source
stringlengths
3
92
c
stringlengths
26
2.25M
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 256 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 16 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
ejercicio5.c
#include <stdio.h> #include <omp.h> int main(){ int n = 9, i, b[n]; for (i=0; i<n; i++) b[i] = -1; #pragma omp parallel { int a; #pragma omp single //copyprivate(a) { printf("\nIntroduce valor de inicialización a: "); scanf("%d", &a ); printf("\nSingle ejecutada por el thread %d\n", omp_get_thread_num()); } #pragma omp for for (i=0; i<n; i++) b[i] = a; } printf("Depués de la región parallel:\n"); for (i=0; i<n; i++) printf("b[%d] = %d\t",i,b[i]); printf("\n"); return 0; }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <nnvm/node.h> #include <mxnet/imperative.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/storage.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_ONEDNN == 1 #include "../operator/nn/dnnl/dnnl_base-inl.h" #endif #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) #include <windows.h> #else #include <unistd.h> #endif namespace mxnet { namespace common { #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) inline size_t current_process_id() { return ::GetCurrentProcessId(); } #else inline size_t current_process_id() { return getpid(); } #endif /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template <typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i + 1] < 0 || indptr[i + 1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template <typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i + 1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i + 1] - 1 && idx[j] >= idx[j + 1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template <typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i + 1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template <typename xpu> void CheckFormatWrapper(const RunContext& rctx, const NDArray& input, const TBlob& err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template <typename xpu> void CheckFormatCSRImpl(const RunContext& rctx, const NDArray& input, const TBlob& err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const mxnet::TShape shape = input.shape(); const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx); const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr); const mxnet::TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu>* s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template <typename xpu> void CheckFormatRSPImpl(const RunContext& rctx, const NDArray& input, const TBlob& err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu>* s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template <typename xpu> void CheckFormatImpl(const RunContext& rctx, const NDArray& input, const TBlob& err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template <typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu>* s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template <typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool* has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool* has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } inline std::string attr_value_string(const nnvm::NodeAttrs& attrs, const std::string& attr_name, std::string default_val = "") { if (attrs.dict.find(attr_name) == attrs.dict.end()) { return default_val; } return attrs.dict.at(attr_name); } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\n WARNING:\n" "Execution of the operator above will fallback to the generic implementation " #if MXNET_USE_ONEDNN == 1 "(not utilizing kernels from oneDNN library) " #endif "with default dense storage type. You are seeing this warning message because " #if MXNET_USE_ONEDNN == 1 "MXNET_ONEDNN_ENABLED flag is set to 0, in which case you can re-enable the default " "execution path by setting MXNET_ONEDNN_ENABLED back to 1, or " #endif "the operator above is unable to process the given ndarrays with specified storage types, " "context and/or parameter, in which case temporary dense ndarrays are generated in order to " "execute the operator. The fallback does not affect the correctness of the programme. Using " "default storage type performance degradation might be observed. \nYou can set environment " "variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to 0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_ONEDNN == 1 if (GetDNNLCacheSize() != -1) common::LogOnce( "MXNET_ONEDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template <typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+ : sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template <typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first + len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len / 2, grainsize, comp); ParallelSortHelper(first + len / 2, len - len / 2, grainsize, comp); thr.join(); std::inplace_merge(first, first + len / 2, first + len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template <typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024 * 16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template <typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort( first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template <typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask " << ctx.dev_mask(); return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max() : size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } template <> constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() { return size_t(2) << 14; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape& shape, const Context& ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape& shape, const Context& ctx, const int dtype, std::vector<NDArray>* vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template <typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif std::memcpy(dst, src, sizeof(DType) * size); #pragma GCC diagnostic pop } } /*! * \breif parallelize add by OpenMP */ template <typename DType> inline void ParallelAdd(DType* dst, const DType* src, index_t size) { static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= add_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } else { for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } } /*! * \brief If numpy compatibility is turned off (default), the shapes passed in * by users follow the legacy shape definition: * 1. 0 ndim means the shape is completely unknown. * 2. 0 dim size means the dim size is unknown. * We need to convert those shapes to use the numpy shape definition: * 1. 0 ndim means it's a scalar tensor. * 2. -1 ndim means the shape is unknown. * 3. 0 dim size means no elements in that dimension. * 4. -1 dim size means the dimension's size is unknown. * so that operator's infer shape function can work in backend. * \param shape to be converted. * Note: It is possible that the shape to be converted is already * numpy compatible. For example, when a subgraph operator's infer * shape function is called from the infer shape pass of the whole * graph, its input/output shapes have been converted to numpy * compatible shapes. */ inline void ConvertToNumpyShape(mxnet::TShape* shape) { if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown *shape = mxnet::TShape(); // unknown shape ndim = -1 } else { for (int j = 0; j < shape->ndim(); ++j) { if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown (*shape)[j] = -1; // unknown dim size = -1 } } } } inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToNumpyShape(&(shapes->at(i))); } } /*! * \brief This is function is used to convert shapes returned by * the infer shape functions/pass to the legacy shape definition. */ inline void ConvertToLegacyShape(mxnet::TShape* shape) { if (!mxnet::ndim_is_known(*shape)) { *shape = mxnet::TShape(0, -1); } else { for (int j = 0; j < shape->ndim(); ++j) { if (!mxnet::dim_size_is_known(*shape, j)) { (*shape)[j] = 0; } } } } inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToLegacyShape(&(shapes->at(i))); } } void ExecuteMonInputCallback( const nnvm::IndexedGraph& idx, const std::vector<NDArray*>& state_arrays, size_t nid, const std::function<void(const char*, const char*, void*)>& monitor_callback); void ExecuteMonOutputCallback( const nnvm::IndexedGraph& idx, const std::vector<NDArray*>& state_arrays, size_t nid, const std::function<void(const char*, const char*, void*)>& monitor_callback); inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) { // convert negative axes to positive values const int ndim = src.ndim(); mxnet::TShape axes = src; for (int i = 0; i < ndim; ++i) { if (axes[i] < 0) { axes[i] += ndim; } CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")"; } return axes; } inline bool is_float(const int dtype) { return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16; } inline bool is_int(const int dtype) { return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kInt32 || dtype == mshadow::kInt64; } inline int get_more_precise_type(const int type1, const int type2) { if (type1 == type2) return type1; if (is_float(type1) && is_float(type2)) { if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) { return mshadow::kFloat64; } if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) { return mshadow::kFloat32; } return mshadow::kFloat16; } else if (is_float(type1) || is_float(type2)) { return is_float(type1) ? type1 : type2; } if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) { return mshadow::kInt64; } if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) { return mshadow::kInt32; } CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8))) << "1 is UInt8 and 1 is Int8 should not get here"; if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) { return mshadow::kUint8; } return mshadow::kInt8; } inline int np_binary_out_infer_type(const int type1, const int type2) { if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) { return mshadow::kInt32; } return get_more_precise_type(type1, type2); } inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) { // obtain the profiler scope name, if assigned previously std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR; const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict; const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter = node_attrs_dict.find("__profiler_scope__"); if (profiler_scope_iter != node_attrs_dict.end()) { profiler_scope = profiler_scope_iter->second; } return profiler_scope; } inline int GetDefaultDtype() { return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32; } inline int GetDefaultDtype(int dtype) { if (dtype != -1) return dtype; return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32; } struct MShadowTypeInfo { std::string name; int size; int acc_size; MShadowTypeInfo(const std::string name, const int size, const int acc_size) : name(std::move(name)), size(size), acc_size(acc_size) {} MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {} }; MShadowTypeInfo mshadow_type_info(const int type_flag); inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) { #if _MSC_VER *ptr = _aligned_malloc(size, alignment); if (*ptr == nullptr) return false; #else int res = posix_memalign(ptr, alignment, size); if (res != 0) return false; #endif return true; } inline void AlignedMemFree(void* ptr) { #if _MSC_VER _aligned_free(ptr); #else free(ptr); #endif } inline index_t div_round(const index_t a, const index_t b) { return (a + b - 1) / b; } inline bool IsPower2(size_t N) { return ((N & (N - 1)) == 0) && N != 0; } inline size_t RoundToPower2(size_t N) { size_t ret = 1; size_t copyN = N; while (N >= 2) { ret *= 2; N /= 2; } if (ret < copyN) { ret *= 2; } return ret; } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
relu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "relu_kernel_arm.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) static inline int relu_kernel(const int i, const int id, const void* data, const float* input, float* output, const float slope) { float32x4_t _zero = vdupq_n_f32(0.f); int step = ((int*)data)[0]; const float* cur_input = input + id * step; float* cur_output = output + id * step; if (slope == 0) { for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); _p = vmaxq_f32(_p, _zero); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(*cur_input++, 0.f); } } else { float32x4_t _slope = vdupq_n_f32(slope); for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); // ri = ai <= bi ? 1...1:0...0 uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _ps = vmulq_f32(_p, _slope); // bitwise select _p = vbslq_f32(_lemask, _ps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(cur_input[0], 0.f) + slope * MIN(cur_input[0], 0.f); cur_input++; } } return 0; } int relu_arm_run(struct tensor* output_tensor, struct tensor* input_tensor, struct relu_param* relu_param, int num_thread) { float* data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; float negativeslope = relu_param->negative_slope; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; relu_kernel(0, 0, &chan_size, data + offset, out_data + offset, negativeslope); } return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, and false is returned. bool CheckConstraintExpression(Expr *CE); private: /// \brief Caches pairs of template-like decls whose associated constraints /// were checked for subsumption and whether or not the first's constraints /// did in fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; public: /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction(TemplateDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); bool CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl *TD, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); bool CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl *TD, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check that the associated constraints of a template declaration match the /// associated constraints of an older declaration of which it is a /// redeclaration. bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old, TemplateParameterList *New); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction& Satisfaction); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(const TemplateParameterList *Old, const TemplateParameterList *New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(Decl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, SourceLocation ConceptNameLoc, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. using OMPCtxStringType = SmallString<8>; using OMPCtxSelectorData = OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, SourceRange SR, ArrayRef<OMPCtxSelectorData> Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation DepLinMapLastLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, Function = 0x2, DerivedToBase = 0x4, ObjC = 0x8, ObjCLifetime = 0x10, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_unop__identity_fc32_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_bool) // op(A') function: GB (_unop_tran__identity_fc32_bool) // C type: GxB_FC32_t // A type: bool // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_bool) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gamma_index_ivfpq.h
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This faiss source code is licensed under the MIT license. * https://github.com/facebookresearch/faiss/blob/master/LICENSE * * * The works below are modified based on faiss: * 1. Replace the static batch indexing with real time indexing * 2. Add the fine-grained sort after PQ coarse sort * 3. Add the numeric field and bitmap filters in the process of searching * * Modified works copyright 2019 The Gamma Authors. * * The modified codes are licensed under the Apache License, Version 2.0 license * found in the LICENSE file in the root directory of this source tree. * */ #ifndef GAMMA_INDEX_IVFPQ_H_ #define GAMMA_INDEX_IVFPQ_H_ #include <unistd.h> #include <atomic> #include "faiss/IndexIVF.h" #include "faiss/IndexIVFPQ.h" #include "faiss/VectorTransform.h" #include "faiss/IndexHNSW.h" #include "faiss/invlists/DirectMap.h" #include "faiss/invlists/InvertedLists.h" #include "faiss/impl/FaissAssert.h" #include "faiss/impl/io.h" #include "faiss/index_io.h" #include "faiss/utils/Heap.h" #include "faiss/utils/distances.h" #include "faiss/utils/hamming.h" #include "faiss/utils/utils.h" #include "table/field_range_index.h" #include "common/gamma_common_data.h" #include "gamma_index_flat.h" #include "gamma_scanner.h" #include "util/log.h" #include "vector/memory_raw_vector.h" #include "vector/raw_vector.h" #include "realtime/realtime_invert_index.h" #include "index/retrieval_model.h" #include "util/utils.h" namespace tig_gamma { /// statistics are robust to internal threading, but not if /// IndexIVFPQ::search_preassigned is called by multiple threads struct IndexIVFPQStats { size_t nrefine; // nb of refines (IVFPQR) size_t n_hamming_pass; // nb of passed Hamming distance tests (for polysemous) // timings measured with the CPU RTC // on all threads size_t search_cycles; size_t refine_cycles; // only for IVFPQR IndexIVFPQStats() { reset(); } void reset(){}; }; // global var that collects them all extern IndexIVFPQStats indexIVFPQ_stats; // namespace { using idx_t = faiss::Index::idx_t; #define TIC t0 = faiss::get_cycles() #define TOC faiss::get_cycles() - t0 /** QueryTables manages the various ways of searching an * IndexIVFPQ. The code contains a lot of branches, depending on: * - metric_type: are we computing L2 or Inner product similarity? * - by_residual: do we encode raw vectors or residuals? * - use_precomputed_table: are x_R|x_C tables precomputed? * - polysemous_ht: are we filtering with polysemous codes? */ struct QueryTables { /***************************************************** * General data from the IVFPQ *****************************************************/ const faiss::IndexIVFPQ &ivfpq; const faiss::IVFSearchParameters *params; // copied from IndexIVFPQ for easier access int d; const faiss::ProductQuantizer &pq; faiss::MetricType metric_type; bool by_residual; int use_precomputed_table; int polysemous_ht; // pre-allocated data buffers float *sim_table, *sim_table_2; float *residual_vec, *decoded_vec; // single data buffer std::vector<float> mem; // for table pointers std::vector<const float *> sim_table_ptrs; explicit QueryTables(const faiss::IndexIVFPQ &ivfpq, const faiss::IVFSearchParameters *params, faiss::MetricType metric_type) : ivfpq(ivfpq), d(ivfpq.d), pq(ivfpq.pq), metric_type(metric_type), by_residual(ivfpq.by_residual), use_precomputed_table(ivfpq.use_precomputed_table) { mem.resize(pq.ksub * pq.M * 2 + d * 2); sim_table = mem.data(); sim_table_2 = sim_table + pq.ksub * pq.M; residual_vec = sim_table_2 + pq.ksub * pq.M; decoded_vec = residual_vec + d; // for polysemous polysemous_ht = ivfpq.polysemous_ht; if (auto ivfpq_params = dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) { polysemous_ht = ivfpq_params->polysemous_ht; } if (polysemous_ht != 0) { q_code.resize(pq.code_size); } init_list_cycles = 0; sim_table_ptrs.resize(pq.M); } /***************************************************** * What we do when query is known *****************************************************/ // field specific to query const float *qi; // query-specific intialization void init_query(const float *qi) { this->qi = qi; if (metric_type == faiss::METRIC_INNER_PRODUCT) init_query_IP(); else init_query_L2(); if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data()); } void init_query_IP() { // precompute some tables specific to the query qi pq.compute_inner_prod_table(qi, sim_table); } void init_query_L2() { if (!by_residual) { pq.compute_distance_table(qi, sim_table); } else if (use_precomputed_table) { pq.compute_inner_prod_table(qi, sim_table_2); } } /***************************************************** * When inverted list is known: prepare computations *****************************************************/ // fields specific to list long key; float coarse_dis; std::vector<uint8_t> q_code; uint64_t init_list_cycles; /// once we know the query and the centroid, we can prepare the /// sim_table that will be used for accumulation /// and dis0, the initial value float precompute_list_tables() { float dis0 = 0; uint64_t t0; TIC; if (by_residual) { if (metric_type == faiss::METRIC_INNER_PRODUCT) dis0 = precompute_list_tables_IP(); else dis0 = precompute_list_tables_L2(); } init_list_cycles += TOC; return dis0; } float precompute_list_table_pointers() { float dis0 = 0; uint64_t t0; TIC; if (by_residual) { if (metric_type == faiss::METRIC_INNER_PRODUCT) FAISS_THROW_MSG("not implemented"); else dis0 = precompute_list_table_pointers_L2(); } init_list_cycles += TOC; return dis0; } /***************************************************** * compute tables for inner prod *****************************************************/ float precompute_list_tables_IP() { // prepare the sim_table that will be used for accumulation // and dis0, the initial value ivfpq.quantizer->reconstruct(key, decoded_vec); // decoded_vec = centroid float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d); if (polysemous_ht) { for (int i = 0; i < d; i++) { residual_vec[i] = qi[i] - decoded_vec[i]; } pq.compute_code(residual_vec, q_code.data()); } return dis0; } /***************************************************** * compute tables for L2 distance *****************************************************/ float precompute_list_tables_L2() { float dis0 = 0; if (use_precomputed_table == 0 || use_precomputed_table == -1) { ivfpq.quantizer->compute_residual(qi, residual_vec, key); pq.compute_distance_table(residual_vec, sim_table); if (polysemous_ht != 0) { pq.compute_code(residual_vec, q_code.data()); } } else if (use_precomputed_table == 1) { dis0 = coarse_dis; faiss::fvec_madd(pq.M * pq.ksub, ivfpq.precomputed_table.data() + key * pq.ksub * pq.M, -2.0, sim_table_2, sim_table); if (polysemous_ht != 0) { ivfpq.quantizer->compute_residual(qi, residual_vec, key); pq.compute_code(residual_vec, q_code.data()); } } else if (use_precomputed_table == 2) { dis0 = coarse_dis; const faiss::MultiIndexQuantizer *miq = dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer); FAISS_THROW_IF_NOT(miq); const faiss::ProductQuantizer &cpq = miq->pq; int Mf = pq.M / cpq.M; const float *qtab = sim_table_2; // query-specific table float *ltab = sim_table; // (output) list-specific table long k = key; for (size_t cm = 0; cm < cpq.M; cm++) { // compute PQ index int ki = k & ((uint64_t(1) << cpq.nbits) - 1); k >>= cpq.nbits; // get corresponding table const float *pc = ivfpq.precomputed_table.data() + (ki * pq.M + cm * Mf) * pq.ksub; if (polysemous_ht == 0) { // sum up with query-specific table faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab); ltab += Mf * pq.ksub; qtab += Mf * pq.ksub; } else { for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) { q_code[m] = faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab); pc += pq.ksub; ltab += pq.ksub; qtab += pq.ksub; } } } } return dis0; } float precompute_list_table_pointers_L2() { float dis0 = 0; if (use_precomputed_table == 1) { dis0 = coarse_dis; const float *s = ivfpq.precomputed_table.data() + key * pq.ksub * pq.M; for (size_t m = 0; m < pq.M; m++) { sim_table_ptrs[m] = s; s += pq.ksub; } } else if (use_precomputed_table == 2) { dis0 = coarse_dis; const faiss::MultiIndexQuantizer *miq = dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer); FAISS_THROW_IF_NOT(miq); const faiss::ProductQuantizer &cpq = miq->pq; int Mf = pq.M / cpq.M; long k = key; int m0 = 0; for (size_t cm = 0; cm < cpq.M; cm++) { int ki = k & ((uint64_t(1) << cpq.nbits) - 1); k >>= cpq.nbits; const float *pc = ivfpq.precomputed_table.data() + (ki * pq.M + cm * Mf) * pq.ksub; for (int m = m0; m < m0 + Mf; m++) { sim_table_ptrs[m] = pc; pc += pq.ksub; } m0 += Mf; } } else { FAISS_THROW_MSG("need precomputed tables"); } if (polysemous_ht) { FAISS_THROW_MSG("not implemented"); // Not clear that it makes sense to implemente this, // because it costs M * ksub, which is what we wanted to // avoid with the tables pointers. } return dis0; } }; template <class C> struct KnnSearchResults { idx_t key; const idx_t *ids; // heap params size_t k; float *heap_sim; idx_t *heap_ids; size_t nup; inline void add(idx_t j, float dis) { if (C::cmp(heap_sim[0], dis)) { idx_t id = ids ? ids[j] : faiss::lo_build(key, j); faiss::heap_replace_top<C>(k, heap_sim, heap_ids, dis, id); nup++; } } }; /***************************************************** * Scaning the codes. * The scanning functions call their favorite precompute_* * function to precompute the tables they need. *****************************************************/ template <typename IDType, faiss::MetricType METRIC_TYPE, class PQDecoder> struct IVFPQScannerT : QueryTables { const uint8_t *list_codes; const IDType *list_ids; size_t list_size; explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq, const faiss::IVFSearchParameters *params) : QueryTables(ivfpq, params, METRIC_TYPE) { } float dis0; void init_list(idx_t list_no, float coarse_dis, int mode) { this->key = list_no; this->coarse_dis = coarse_dis; if (mode == 2) { dis0 = precompute_list_tables(); } else if (mode == 1) { dis0 = precompute_list_table_pointers(); } } /// version of the scan where we use precomputed tables template <class SearchResultType> void scan_list_with_table(size_t ncode, const uint8_t* codes, SearchResultType& res) const { for (size_t j = 0; j < ncode; j++) { PQDecoder decoder(codes, pq.nbits); codes += pq.code_size; float dis = dis0; const float* tab = sim_table; for (size_t m = 0; m < pq.M; m++) { dis += tab[decoder.decode()]; tab += pq.ksub; } res.add(j, dis); } } /// tables are not precomputed, but pointers are provided to the /// relevant X_c|x_r tables template <class SearchResultType> void scan_list_with_pointer(size_t ncode, const uint8_t *codes, SearchResultType &res) const { for (size_t j = 0; j < ncode; j++) { PQDecoder decoder(codes, pq.nbits); codes += pq.code_size; float dis = dis0; const float *tab = sim_table_2; for (size_t m = 0; m < pq.M; m++) { int ci = decoder.decode(); dis += sim_table_ptrs[m][ci] - 2 * tab[ci]; tab += pq.ksub; } res.add(j, dis); } } /// nothing is precomputed: access residuals on-the-fly template <class SearchResultType> void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes, SearchResultType &res) const { const float *dvec; float dis0 = 0; if (by_residual) { if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) { ivfpq.quantizer->reconstruct(key, residual_vec); dis0 = faiss::fvec_inner_product(residual_vec, qi, d); } else { ivfpq.quantizer->compute_residual(qi, residual_vec, key); } dvec = residual_vec; } else { dvec = qi; dis0 = 0; } for (size_t j = 0; j < ncode; j++) { pq.decode(codes, decoded_vec); codes += pq.code_size; float dis; if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) { dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d); } else { dis = faiss::fvec_L2sqr(decoded_vec, dvec, d); } res.add(j, dis); } } /***************************************************** * Scanning codes with polysemous filtering *****************************************************/ template <class HammingComputer, class SearchResultType> void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes, SearchResultType &res) const { int ht = ivfpq.polysemous_ht; size_t n_hamming_pass = 0; int code_size = pq.code_size; HammingComputer hc(q_code.data(), code_size); for (size_t j = 0; j < ncode; j++) { const uint8_t *b_code = codes; int hd = hc.hamming(b_code); if (hd < ht) { n_hamming_pass++; PQDecoder decoder(codes, pq.nbits); float dis = dis0; const float *tab = sim_table; for (size_t m = 0; m < pq.M; m++) { dis += tab[decoder.decode()]; tab += pq.ksub; } res.add(j, dis); } codes += code_size; } #pragma omp critical { indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; } } template <class SearchResultType> void scan_list_polysemous(size_t ncode, const uint8_t *codes, SearchResultType &res) const { switch (pq.code_size) { #define HANDLE_CODE_SIZE(cs) \ case cs: \ scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \ ncode, codes, res); \ break HANDLE_CODE_SIZE(4); HANDLE_CODE_SIZE(8); HANDLE_CODE_SIZE(16); HANDLE_CODE_SIZE(20); HANDLE_CODE_SIZE(32); HANDLE_CODE_SIZE(64); #undef HANDLE_CODE_SIZE default: if (pq.code_size % 8 == 0) scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>( ncode, codes, res); else scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>( ncode, codes, res); break; } } }; struct GammaIVFPQIndex; template <faiss::MetricType METRIC_TYPE, class C, class PQDecoder> struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE, PQDecoder>, GammaInvertedListScanner { int precompute_mode; bool store_pairs; const GammaIVFPQIndex &gamma_ivfpq_; GammaIVFPQScanner(const GammaIVFPQIndex &gamma_ivfpq, bool store_pairs, int precompute_mode) : IVFPQScannerT<idx_t, METRIC_TYPE, PQDecoder>(gamma_ivfpq, nullptr), precompute_mode(precompute_mode), store_pairs(store_pairs), gamma_ivfpq_(gamma_ivfpq) { } inline void set_query(const float *query) override { this->init_query(query); } inline void set_list(idx_t list_no, float coarse_dis) override { this->init_list(list_no, coarse_dis, precompute_mode); } inline float distance_to_code(const uint8_t *code) const override { assert(precompute_mode == 2); float dis = this->dis0; const float *tab = this->sim_table; PQDecoder decoder(code, this->pq.nbits); for (size_t m = 0; m < this->pq.M; m++) { dis += tab[decoder.decode()]; tab += this->pq.ksub; } return dis; } /// version of the scan where we use precomputed tables template <class SearchResultType> void scan_list_with_table(size_t ncode, const uint8_t* codes, SearchResultType& res) const { for (size_t j = 0; j < ncode; j++) { if (res.ids[j] & realtime::kDelIdxMask) { codes += this->pq.code_size; continue; } if (!retrieval_context_->IsValid(res.ids[j] & realtime::kRecoverIdxMask)) { codes += this->pq.code_size; continue; } PQDecoder decoder(codes, this->pq.nbits); codes += this->pq.code_size; float dis = this->dis0; const float* tab = this->sim_table; for (size_t m = 0; m < this->pq.M; m++) { dis += tab[decoder.decode()]; tab += this->pq.ksub; } res.add(j, dis); } } inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids, float *heap_sim, idx_t *heap_ids, size_t k) const override { KnnSearchResults<C> res = {/* key */ this->key, /* ids */ this->store_pairs ? nullptr : ids, /* k */ k, /* heap_sim */ heap_sim, /* heap_ids */ heap_ids, /* nup */ 0}; if (this->polysemous_ht > 0) { assert(precompute_mode == 2); this->scan_list_polysemous(ncode, codes, res); } else if (precompute_mode == 2) { this->scan_list_with_table(ncode, codes, res); } else if (precompute_mode == 1) { this->scan_list_with_pointer(ncode, codes, res); } else if (precompute_mode == 0) { this->scan_on_the_fly_dist(ncode, codes, res); } else { FAISS_THROW_MSG("bad precomp mode"); } return res.nup; } }; class IVFPQRetrievalParameters : public RetrievalParameters { public: IVFPQRetrievalParameters() : RetrievalParameters() { parallel_on_queries_ = true; recall_num_ = 100; nprobe_ = -1; } IVFPQRetrievalParameters(bool parallel_on_queries, int recall_num, int nprobe, enum DistanceComputeType type) { parallel_on_queries_ = parallel_on_queries; recall_num_ = recall_num; nprobe_ = nprobe; distance_compute_type_ = type; } IVFPQRetrievalParameters(enum DistanceComputeType type) { parallel_on_queries_ = true; recall_num_ = 100; nprobe_ = -1; distance_compute_type_ = type; } virtual ~IVFPQRetrievalParameters() {} int RecallNum() { return recall_num_; } void SetRecallNum(int recall_num) { recall_num_ = recall_num; } int Nprobe() { return nprobe_; } void SetNprobe(int nprobe) { nprobe_ = nprobe; } bool ParallelOnQueries() { return parallel_on_queries_; } void SetParallelOnQueries(bool parallel_on_queries) { parallel_on_queries_ = parallel_on_queries; } protected: // parallelize over queries or ivf lists bool parallel_on_queries_; int recall_num_; int nprobe_; }; struct IVFPQModelParams { int ncentroids; // coarse cluster center number int nsubvector; // number of sub cluster center int nbits_per_idx; // bit number of sub cluster center int nprobe; // search how many bucket DistanceComputeType metric_type; bool has_hnsw; int nlinks; // link number for hnsw graph int efConstruction; // construction parameter for building hnsw graph int efSearch; // search parameter for search in hnsw graph bool has_opq; int opq_nsubvector; // number of sub cluster center of opq int bucket_init_size; // original size of RTInvertIndex bucket int bucket_max_size; // max size of RTInvertIndex bucket IVFPQModelParams() { ncentroids = 2048; nsubvector = 64; nbits_per_idx = 8; nprobe = 80; metric_type = DistanceComputeType::INNER_PRODUCT; has_hnsw = false; nlinks = 32; efConstruction = 200; efSearch = 64; has_opq = false; opq_nsubvector = 64; bucket_init_size = 1000; bucket_max_size = 1280000; } int Parse(const char *str) { utils::JsonParser jp; if (jp.Parse(str)) { LOG(ERROR) << "parse IVFPQ retrieval parameters error: " << str; return -1; } int ncentroids; int nsubvector; int nbits_per_idx; int nprobe; // -1 as default if (!jp.GetInt("ncentroids", ncentroids)) { if (ncentroids < -1) { LOG(ERROR) << "invalid ncentroids =" << ncentroids; return -1; } if (ncentroids > 0) this->ncentroids = ncentroids; } else { LOG(ERROR) << "cannot get ncentroids for ivfpq, set it when create space"; return -1; } if (!jp.GetInt("nsubvector", nsubvector)) { if (nsubvector < -1) { LOG(ERROR) << "invalid nsubvector =" << nsubvector; return -1; } if (nsubvector > 0) this->nsubvector = nsubvector; } else { LOG(ERROR) << "cannot get nsubvector for ivfpq, set it when create space"; return -1; } if (!jp.GetInt("nbits_per_idx", nbits_per_idx)) { if (nbits_per_idx < -1) { LOG(ERROR) << "invalid nbits_per_idx =" << nbits_per_idx; return -1; } if (nbits_per_idx > 0) this->nbits_per_idx = nbits_per_idx; } if (!jp.GetInt("nprobe", nprobe)) { if (nprobe < -1) { LOG(ERROR) << "invalid nprobe =" << nprobe; return -1; } if (nprobe > 0) this->nprobe = nprobe; if (this->nprobe > this->ncentroids) { LOG(ERROR) << "nprobe should less than ncentroids"; return -1; } } int bucket_init_size; int bucket_max_size; // -1 as default if (!jp.GetInt("bucket_init_size", bucket_init_size)) { if (bucket_init_size < -1) { LOG(ERROR) << "invalid bucket_init_size =" << bucket_init_size; return -1; } if (bucket_init_size > 0) this->bucket_init_size = bucket_init_size; } if (!jp.GetInt("bucket_max_size", bucket_max_size)) { if (bucket_max_size < -1) { LOG(ERROR) << "invalid bucket_max_size =" << bucket_max_size; return -1; } if (bucket_max_size > 0) this->bucket_max_size = bucket_max_size; } std::string metric_type; if (!jp.GetString("metric_type", metric_type)) { if (strcasecmp("L2", metric_type.c_str()) && strcasecmp("InnerProduct", metric_type.c_str())) { LOG(ERROR) << "invalid metric_type = " << metric_type; return -1; } if (!strcasecmp("L2", metric_type.c_str())) this->metric_type = DistanceComputeType::L2; else this->metric_type = DistanceComputeType::INNER_PRODUCT; } utils::JsonParser jp_hnsw; if (!jp.GetObject("hnsw", jp_hnsw)) { has_hnsw = true; int nlinks; int efConstruction; int efSearch; // -1 as default if (!jp_hnsw.GetInt("nlinks", nlinks)) { if (nlinks < -1) { LOG(ERROR) << "invalid nlinks = " << nlinks; return -1; } if(nlinks > 0) this->nlinks = nlinks; } if (!jp_hnsw.GetInt("efConstruction", efConstruction)) { if (efConstruction < -1) { LOG(ERROR) << "invalid efConstruction = " << efConstruction; return -1; } if(efConstruction > 0) this->efConstruction = efConstruction; } if (!jp_hnsw.GetInt("efSearch", efSearch)) { if (efSearch < -1) { LOG(ERROR) << "invalid efSearch = " << efSearch; return -1; } if(efSearch > 0) this->efSearch = efSearch; } } utils::JsonParser jp_opq; if (!jp.GetObject("opq", jp_opq)) { has_opq = true; int opq_nsubvector; // -1 as default if (!jp_opq.GetInt("nsubvector", opq_nsubvector)) { if (nsubvector < -1) { LOG(ERROR) << "invalid opq_nsubvector = " << opq_nsubvector; return -1; } if (opq_nsubvector > 0) this->opq_nsubvector = opq_nsubvector; } } if (!Validate()) return -1; return 0; } bool Validate() { if (ncentroids <= 0 || nsubvector <= 0 || nbits_per_idx <= 0) return false; //if (nbits_per_idx != 8) { // LOG(ERROR) << "only support 8 now, nbits_per_idx=" << nbits_per_idx; // return false; //} return true; } std::string ToString() { std::stringstream ss; ss << "ncentroids =" << ncentroids << ", "; ss << "nsubvector =" << nsubvector << ", "; ss << "nbits_per_idx =" << nbits_per_idx << ", "; ss << "nprobe =" << nprobe << ", "; ss << "metric_type =" << (int)metric_type << ", "; ss << "bucket_init_size =" << bucket_init_size << ", "; ss << "bucket_max_size =" << bucket_max_size; if (has_hnsw) { ss << ", hnsw: nlinks=" << nlinks << ", "; ss << "efConstrction=" << efConstruction << ", "; ss << "efSearch=" << efSearch; } if (has_opq) { ss << ", opq: nsubvector=" << opq_nsubvector; } return ss.str(); } int ToJson(utils::JsonParser &jp) { return 0; } }; struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ { GammaIVFPQIndex(); virtual ~GammaIVFPQIndex(); GammaInvertedListScanner *GetInvertedListScanner( bool store_pairs, faiss::MetricType metric_type); template <class PQDecoder> GammaInvertedListScanner *GetGammaInvertedListScanner( bool store_pairs, faiss::MetricType metric_type); int Init(const std::string &model_parameters, int indexing_size) override; RetrievalParameters *Parse(const std::string &parameters) override; int Indexing() override; bool Add(int n, const uint8_t *vec); int Update(const std::vector<int64_t> &ids, const std::vector<const uint8_t *> &vecs); // assign the vectors, then call search_preassign int Search(RetrievalContext *retrieval_context, int n, const uint8_t *x, int k, float *distances, idx_t *labels); void search_preassigned(RetrievalContext *retrieval_context, int n, const float *x, const float *applied_x, int k, const idx_t *keys, const float *coarse_dis, float *distances, idx_t *labels, int nprobe, bool store_pairs, const faiss::IVFSearchParameters *params = nullptr); long GetTotalMemBytes() override { if (!rt_invert_index_ptr_) { return 0; } return rt_invert_index_ptr_->GetTotalMemBytes(); } int Dump(const std::string &dir) override; int Load(const std::string &index_dir) override; virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1, idx_t a2) const; int Delete(const std::vector<int64_t> &ids); void train(int64_t n, const float *x) { faiss::IndexIVFPQ::train(n, x); } int indexed_vec_count_; realtime::RTInvertIndex *rt_invert_index_ptr_; bool compaction_; size_t compact_bucket_no_; uint64_t compacted_num_; uint64_t updated_num_; int d_; DistanceComputeType metric_type_; faiss::VectorTransform *opq_; // 0 is FlatL2, 1 is HNSWFlat int quantizer_type_; #ifdef PERFORMANCE_TESTING std::atomic<uint64_t> search_count_; int add_count_; #endif IVFPQModelParams *model_param_; }; } // namespace tig_gamma #endif
gcolor.c
#include "gcolor.h" int gcolor(int* adj, int n) { int size = pow(2, n); int table[size]; compute_table_omp(adj, n, table, size); // binary seach k int k = binary_search(adj, n, table, size, 0, n); return k; } long long gcolor_2(int* adj, int n, int* table, int k, int size) { long long sum = 0; for(int i = 0; i < size; i++) { int set[n]; int num = int_to_set(set, i, n); int m = table[i]; long long sxk = pow(m, k); sum = sum + pow(-1, n-num)*sxk; } return sum; } long long gcolor_2_omp(int* adj, int n, int* table, int k, int size) { long long sum = 0; #pragma omp parallel for reduction(+: sum) for(int i = 0; i < size; i++) { int set[n]; int num = int_to_set(set, i, n); int m = table[i]; long long sxk = pow(m, k); sum = sum + pow(-1, n-num)*sxk; } return sum; } int binary_search(int* adj, int n, int* table, int size, int left, int right) { int flags[n+1]; // 0 = undefined, 1 = valid colouring, -1 = invalid colouring while(left <= right) { int middle = (left + right) / 2; long long value = gcolor_2(adj, n, table, middle, size); if(value > 0 && middle > 0 && flags[middle - 1] == -1) { return middle; } else if(value <= 0 && middle < (n-1) && flags[middle + 1] == 1) { return middle + 1; } else if(value > 0) { flags[middle] = 1; right = middle - 1; } else { flags[middle] = -1; left = middle + 1; } } return -1; } int nCk_withreplace(int n, int k) { if(n < k) { return 0; } int top = factorial(n + k - 1); int down = factorial(k) * factorial(n - 1); return top/down; } int factorial(int x) { return (x == 1 ? x : x * factorial(x - 1)); } int remove_vertex(int* adj, int n, int* set) { for(int i = 0; i < n; i++) { if(set[i] == 1) { set[i] = 0; return i; } } return -1; } int remove_vertex_2(int* adj, int n, int* set, int vid) { for(int i = 0; i < n; i++) { if(adj[i * n + vid] == 1 && set[i] == 1) // if there is i to v, and i is in the set { set[i] = 0; // remove i for the set } } return -1; } int compute_table(int* adj, int n, int* table, int size) { // sets are V subsets for(int set = 0; set < size; set++) { table[set] = -1; // -1 = undefined } table[0] = 0; // s[V] = 0, no sets for(int set = size-1; set >= 0; set--) { compute_table_helper(adj, n, table, set); } return 1; } int compute_table_helper(int* adj, int n, int* table, int index) { if(table[index] == -1) // if it is not defined { int set[n]; int_to_set(set, index, n); int vid = remove_vertex(adj, n, set); int vindex = set_to_int(set, n); compute_table_helper(adj, n, table, vindex); // s(X U v) remove_vertex_2(adj, n, set, vid); int vadjindex = set_to_int(set, n); compute_table_helper(adj, n, table, vadjindex); // s(X U v U N(v)} table[index] = table[vindex] + table[vadjindex] + 1; } return 1; } int compute_table_omp(int* adj, int n, int* table, int size) { // sets are V subsets for(int set = 0; set < size; set++) { table[set] = -1; // -1 = undefined } table[0] = 0; // s[V] = 0, no sets // #pragma omp parallel #pragma omp single nowait for(int set = size-1; set >= 0; set--) { compute_table_helper_omp(adj, n, table, set); } return 1; } int compute_table_helper_omp(int* adj, int n, int* table, int index) { if(table[index] == -1) // if it is not defined { int set[n]; int_to_set(set, index, n); int vid = remove_vertex(adj, n, set); int vindex = set_to_int(set, n); #pragma omp task compute_table_helper(adj, n, table, vindex); // s(X U v) remove_vertex_2(adj, n, set, vid); int vadjindex = set_to_int(set, n); #pragma omp task compute_table_helper(adj, n, table, vadjindex); // s(X U v U N(v)} #pragma omp taskwait table[index] = table[vindex] + table[vadjindex] + 1; } return 1; } int num_vertex(int* set, int length) { int count = 0; for(int i = 0; i < length; i++) { if(set[i] == 0) { count++; } } return count; } int int_to_set(int* set, int val, int n) { unsigned int mask = 1 << (n - 1); int count = 0; for(int i = 0; i < n; i++) { if((val & mask) == 0) { set[i] = 0; } else { set[i] = 1; count++; } mask >>= 1; } return count; // return number of 1s } int set_to_int(int* set, int n) { int val = 0; for(int i = 0; i < n; i++) { if(set[i] == 1) { val += pow(2, n-i-1); } } return val; } int is_subset(int* S, int* W, int length) { for(int i = 0; i < length; i++) { if((S[i] == 0) && (W[i] == 1)) { return 0; } } return 1; }
GB_assign_zombie4.c
//------------------------------------------------------------------------------ // GB_assign_zombie4: delete entries in C(i,:) for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // For GrB_Row_assign or GrB_Col_assign, C(i,J)<M,repl>=..., if C_replace is // true, and mask M is present, then any entry C(i,j) outside the list J must // be deleted, if M(0,j)=0. // GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other. // C must be sparse or hypersparse. // M can have any sparsity structure: hypersparse, sparse, bitmap, or full // C->iso is not affected. #include "GB_assign.h" #include "GB_assign_zombie.h" void GB_assign_zombie4 ( GrB_Matrix C, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const int64_t i, // index of entries to delete const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (GB_ZOMBIES_OK (C)) ; ASSERT (!GB_JUMBLED (C)) ; // binary search on C ASSERT (!GB_PENDING (C)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_JUMBLED (M)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const int64_t Cnvec = C->nvec ; int64_t *restrict Ci = C->i ; int64_t nzombies = C->nzombies ; const int64_t zorig = nzombies ; //-------------------------------------------------------------------------- // get M //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int8_t *restrict Mb = M->b ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const int64_t Mnvec = M->nvec ; const int64_t Mvlen = M->vlen ; ASSERT (Mvlen == 1) ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_is_full = GB_IS_FULL (M) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; //-------------------------------------------------------------------------- // delete entries in C(i,:) //-------------------------------------------------------------------------- // The entry C(i,j) is deleted if j is not in the J, and if M(0,j)=0 (if // the mask is not complemented) or M(0,j)=1 (if the mask is complemented. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t kfirst, klast ; GB_PARTITION (kfirst, klast, Cnvec, taskid, ntasks) ; for (int64_t k = kfirst ; k < klast ; k++) { //------------------------------------------------------------------ // get C(:,j) and determine if j is outside the list J //------------------------------------------------------------------ int64_t j = GBH (Ch, k) ; bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ; if (j_outside) { //-------------------------------------------------------------- // j is not in J; find C(i,j) //-------------------------------------------------------------- int64_t pC = Cp [k] ; int64_t pC_end = Cp [k+1] ; int64_t pright = pC_end - 1 ; bool found, is_zombie ; GB_BINARY_SEARCH_ZOMBIE (i, Ci, pC, pright, found, zorig, is_zombie) ; //-------------------------------------------------------------- // delete C(i,j) if found, not a zombie, and M(0,j) allows it //-------------------------------------------------------------- if (found && !is_zombie) { //---------------------------------------------------------- // C(i,j) is a live entry not in the C(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. bool mij = false ; if (M_is_bitmap || M_is_full) { // M is bitmap/full, no need for GB_lookup int64_t pM = j ; mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; } else { // M is sparse or hypersparse int64_t pM, pM_end ; int64_t pleft = 0 ; int64_t pright = Mnvec - 1 ; GB_lookup (M_is_hyper, Mh, Mp, Mvlen, &pleft, pright, j, &pM, &pM_end) ; if (pM < pM_end) { // found it mij = GB_mcast (Mx, pM, msize) ; } } if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete C(i,j) by marking it as a zombie nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; }
nested_serial.c
// RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true %libomp-run | %python %S/check.py -c 'CHECK' %s #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "omp_testsuite.h" int main(int argc, char** argv) { omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N"); omp_set_nested(1); #pragma omp parallel num_threads(1) { go_parallel_nthreads(1); go_parallel_nthreads(1); #pragma omp parallel num_threads(1) { go_parallel_nthreads(1); } go_parallel_nthreads(1); } go_parallel_nthreads(1); go_parallel_nthreads(1); return get_exit_value(); } // CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1 // CHECK: num_threads=1 TESTER: tl:2 at:0 tn:0 nt:1 // CHECK: num_threads=1 TESTER: tl:3 at:0 tn:0 nt:1 // CHECK: num_threads=1 TESTER: tl:2 at:0 tn:0 nt:1 // CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
x_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" #include "timers.h" //--------------------------------------------------------------------- // // Performs line solves in X direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. // //--------------------------------------------------------------------- void x_solve() { // printf("xxxxxx\n"); int i, j, k, m, n, isize; //kai // int k12; // consistent_data(&k12, "int", 1); //--------------------------------------------------------------------- //--------------------------------------------------------------------- if (timeron) timer_start(t_xsolve); //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // This function computes the left hand side in the xi-direction //--------------------------------------------------------------------- isize = grid_points[0]-1; //--------------------------------------------------------------------- // determine a (labeled f) and n jacobians //--------------------------------------------------------------------- #pragma omp parallel for default(shared) shared(isize) private(i,j,k,m,n) for (k = k12+1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 0; i <= isize; i++) { tmp1 = rho_i[k][j][i]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; //------------------------------------------------------------------- // //------------------------------------------------------------------- fjac[i][0][0] = 0.0; fjac[i][1][0] = 1.0; fjac[i][2][0] = 0.0; fjac[i][3][0] = 0.0; fjac[i][4][0] = 0.0; fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1]) + c2 * qs[k][j][i]; fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] ); fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 ); fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 ); fjac[i][4][1] = c2; fjac[i][0][2] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2; fjac[i][1][2] = u[k][j][i][2] * tmp1; fjac[i][2][2] = u[k][j][i][1] * tmp1; fjac[i][3][2] = 0.0; fjac[i][4][2] = 0.0; fjac[i][0][3] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2; fjac[i][1][3] = u[k][j][i][3] * tmp1; fjac[i][2][3] = 0.0; fjac[i][3][3] = u[k][j][i][1] * tmp1; fjac[i][4][3] = 0.0; fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * ( u[k][j][i][1] * tmp2 ); fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1 - c2 * ( u[k][j][i][1]*u[k][j][i][1] * tmp2 + qs[k][j][i] ); fjac[i][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * tmp2; fjac[i][3][4] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * tmp2; fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 ); njac[i][0][0] = 0.0; njac[i][1][0] = 0.0; njac[i][2][0] = 0.0; njac[i][3][0] = 0.0; njac[i][4][0] = 0.0; njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1]; njac[i][1][1] = con43 * c3c4 * tmp1; njac[i][2][1] = 0.0; njac[i][3][1] = 0.0; njac[i][4][1] = 0.0; njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2]; njac[i][1][2] = 0.0; njac[i][2][2] = c3c4 * tmp1; njac[i][3][2] = 0.0; njac[i][4][2] = 0.0; njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3]; njac[i][1][3] = 0.0; njac[i][2][3] = 0.0; njac[i][3][3] = c3c4 * tmp1; njac[i][4][3] = 0.0; njac[i][0][4] = - ( con43 * c3c4 - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4]; njac[i][1][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][1]; njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2]; njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3]; njac[i][4][4] = ( c1345 ) * tmp1; } //--------------------------------------------------------------------- // now jacobians set, so form left hand side in x direction //--------------------------------------------------------------------- lhsinit(lhs, isize); for (i = 1; i <= isize-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][AA][0][0] = - tmp2 * fjac[i-1][0][0] - tmp1 * njac[i-1][0][0] - tmp1 * dx1; lhs[i][AA][1][0] = - tmp2 * fjac[i-1][1][0] - tmp1 * njac[i-1][1][0]; lhs[i][AA][2][0] = - tmp2 * fjac[i-1][2][0] - tmp1 * njac[i-1][2][0]; lhs[i][AA][3][0] = - tmp2 * fjac[i-1][3][0] - tmp1 * njac[i-1][3][0]; lhs[i][AA][4][0] = - tmp2 * fjac[i-1][4][0] - tmp1 * njac[i-1][4][0]; lhs[i][AA][0][1] = - tmp2 * fjac[i-1][0][1] - tmp1 * njac[i-1][0][1]; lhs[i][AA][1][1] = - tmp2 * fjac[i-1][1][1] - tmp1 * njac[i-1][1][1] - tmp1 * dx2; lhs[i][AA][2][1] = - tmp2 * fjac[i-1][2][1] - tmp1 * njac[i-1][2][1]; lhs[i][AA][3][1] = - tmp2 * fjac[i-1][3][1] - tmp1 * njac[i-1][3][1]; lhs[i][AA][4][1] = - tmp2 * fjac[i-1][4][1] - tmp1 * njac[i-1][4][1]; lhs[i][AA][0][2] = - tmp2 * fjac[i-1][0][2] - tmp1 * njac[i-1][0][2]; lhs[i][AA][1][2] = - tmp2 * fjac[i-1][1][2] - tmp1 * njac[i-1][1][2]; lhs[i][AA][2][2] = - tmp2 * fjac[i-1][2][2] - tmp1 * njac[i-1][2][2] - tmp1 * dx3; lhs[i][AA][3][2] = - tmp2 * fjac[i-1][3][2] - tmp1 * njac[i-1][3][2]; lhs[i][AA][4][2] = - tmp2 * fjac[i-1][4][2] - tmp1 * njac[i-1][4][2]; lhs[i][AA][0][3] = - tmp2 * fjac[i-1][0][3] - tmp1 * njac[i-1][0][3]; lhs[i][AA][1][3] = - tmp2 * fjac[i-1][1][3] - tmp1 * njac[i-1][1][3]; lhs[i][AA][2][3] = - tmp2 * fjac[i-1][2][3] - tmp1 * njac[i-1][2][3]; lhs[i][AA][3][3] = - tmp2 * fjac[i-1][3][3] - tmp1 * njac[i-1][3][3] - tmp1 * dx4; lhs[i][AA][4][3] = - tmp2 * fjac[i-1][4][3] - tmp1 * njac[i-1][4][3]; lhs[i][AA][0][4] = - tmp2 * fjac[i-1][0][4] - tmp1 * njac[i-1][0][4]; lhs[i][AA][1][4] = - tmp2 * fjac[i-1][1][4] - tmp1 * njac[i-1][1][4]; lhs[i][AA][2][4] = - tmp2 * fjac[i-1][2][4] - tmp1 * njac[i-1][2][4]; lhs[i][AA][3][4] = - tmp2 * fjac[i-1][3][4] - tmp1 * njac[i-1][3][4]; lhs[i][AA][4][4] = - tmp2 * fjac[i-1][4][4] - tmp1 * njac[i-1][4][4] - tmp1 * dx5; lhs[i][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][0][0] + tmp1 * 2.0 * dx1; lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0]; lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0]; lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0]; lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0]; lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1]; lhs[i][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][1][1] + tmp1 * 2.0 * dx2; lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1]; lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1]; lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1]; lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2]; lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2]; lhs[i][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][2][2] + tmp1 * 2.0 * dx3; lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2]; lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2]; lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3]; lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3]; lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3]; lhs[i][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][3][3] + tmp1 * 2.0 * dx4; lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3]; lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4]; lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4]; lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4]; lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4]; lhs[i][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][4][4] + tmp1 * 2.0 * dx5; lhs[i][CC][0][0] = tmp2 * fjac[i+1][0][0] - tmp1 * njac[i+1][0][0] - tmp1 * dx1; lhs[i][CC][1][0] = tmp2 * fjac[i+1][1][0] - tmp1 * njac[i+1][1][0]; lhs[i][CC][2][0] = tmp2 * fjac[i+1][2][0] - tmp1 * njac[i+1][2][0]; lhs[i][CC][3][0] = tmp2 * fjac[i+1][3][0] - tmp1 * njac[i+1][3][0]; lhs[i][CC][4][0] = tmp2 * fjac[i+1][4][0] - tmp1 * njac[i+1][4][0]; lhs[i][CC][0][1] = tmp2 * fjac[i+1][0][1] - tmp1 * njac[i+1][0][1]; lhs[i][CC][1][1] = tmp2 * fjac[i+1][1][1] - tmp1 * njac[i+1][1][1] - tmp1 * dx2; lhs[i][CC][2][1] = tmp2 * fjac[i+1][2][1] - tmp1 * njac[i+1][2][1]; lhs[i][CC][3][1] = tmp2 * fjac[i+1][3][1] - tmp1 * njac[i+1][3][1]; lhs[i][CC][4][1] = tmp2 * fjac[i+1][4][1] - tmp1 * njac[i+1][4][1]; lhs[i][CC][0][2] = tmp2 * fjac[i+1][0][2] - tmp1 * njac[i+1][0][2]; lhs[i][CC][1][2] = tmp2 * fjac[i+1][1][2] - tmp1 * njac[i+1][1][2]; lhs[i][CC][2][2] = tmp2 * fjac[i+1][2][2] - tmp1 * njac[i+1][2][2] - tmp1 * dx3; lhs[i][CC][3][2] = tmp2 * fjac[i+1][3][2] - tmp1 * njac[i+1][3][2]; lhs[i][CC][4][2] = tmp2 * fjac[i+1][4][2] - tmp1 * njac[i+1][4][2]; lhs[i][CC][0][3] = tmp2 * fjac[i+1][0][3] - tmp1 * njac[i+1][0][3]; lhs[i][CC][1][3] = tmp2 * fjac[i+1][1][3] - tmp1 * njac[i+1][1][3]; lhs[i][CC][2][3] = tmp2 * fjac[i+1][2][3] - tmp1 * njac[i+1][2][3]; lhs[i][CC][3][3] = tmp2 * fjac[i+1][3][3] - tmp1 * njac[i+1][3][3] - tmp1 * dx4; lhs[i][CC][4][3] = tmp2 * fjac[i+1][4][3] - tmp1 * njac[i+1][4][3]; lhs[i][CC][0][4] = tmp2 * fjac[i+1][0][4] - tmp1 * njac[i+1][0][4]; lhs[i][CC][1][4] = tmp2 * fjac[i+1][1][4] - tmp1 * njac[i+1][1][4]; lhs[i][CC][2][4] = tmp2 * fjac[i+1][2][4] - tmp1 * njac[i+1][2][4]; lhs[i][CC][3][4] = tmp2 * fjac[i+1][3][4] - tmp1 * njac[i+1][3][4]; lhs[i][CC][4][4] = tmp2 * fjac[i+1][4][4] - tmp1 * njac[i+1][4][4] - tmp1 * dx5; } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(IMAX) and rhs'(IMAX) will be sent to next cell //--------------------------------------------------------------------- //--------------------------------------------------------------------- // outer most do loops - sweeping in i direction //--------------------------------------------------------------------- //--------------------------------------------------------------------- // multiply c[k][j][0] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] ); //--------------------------------------------------------------------- // begin inner most do loop // do all the elements of the cell unless last //--------------------------------------------------------------------- for (i = 1; i <= isize-1; i++) { //------------------------------------------------------------------- // rhs(i) = rhs(i) - A*rhs(i-1) //------------------------------------------------------------------- matvec_sub(lhs[i][AA], rhs[k][j][i-1], rhs[k][j][i]); //------------------------------------------------------------------- // B(i) = B(i) - C(i-1)*A(i) //------------------------------------------------------------------- matmul_sub(lhs[i][AA], lhs[i-1][CC], lhs[i][BB]); //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs //------------------------------------------------------------------- binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] ); } //--------------------------------------------------------------------- // rhs(isize) = rhs(isize) - A*rhs(isize-1) //--------------------------------------------------------------------- matvec_sub(lhs[isize][AA], rhs[k][j][isize-1], rhs[k][j][isize]); //--------------------------------------------------------------------- // B(isize) = B(isize) - C(isize-1)*A(isize) //--------------------------------------------------------------------- matmul_sub(lhs[isize][AA], lhs[isize-1][CC], lhs[isize][BB]); //--------------------------------------------------------------------- // multiply rhs() by b_inverse() and copy to rhs //--------------------------------------------------------------------- binvrhs( lhs[isize][BB], rhs[k][j][isize] ); //--------------------------------------------------------------------- // back solve: if last cell, then generate U(isize)=rhs(isize) // else assume U(isize) is loaded in un pack backsub_info // so just use it // after u(istart) will be sent to next cell //--------------------------------------------------------------------- for (i = isize-1; i >=0; i--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[i][CC][n][m]*rhs[k][j][i+1][n]; } } } } //kai k12 = 0; // printf("k12=%p\n", &k12); } if (timeron) timer_stop(t_xsolve); }
RPN.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <ctype.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" /* * RPN reversed polish notation * * 4/2009 Public Domain by Wesley Ebisuzaki * * operations: * * + - * / * == != < <= > >= * * sqrt, sq, abs, 1/x, floor, ceil, pow (x^y), exp, ln * min, max, merge, mask * sin, cos, tan, asin, acos, atan, atan2 * * pi = 3.14159... * days_in_ref_month = number of days in the reference month * days_in_verf_month = number of days in the verification month * * registers: sto_N, rcl_N, clr_N * rcl_lat, rcl_lon * rcl (data) * stack: exc (swap), pop, dup, clr * * yrev - swap grids, north <-> south * alt_x_scan - for Glahn packing * xave, xdev * * print_(X): X=max, min, rms, corr, ave, diff * * at the end of rpn, the top of the stack is saved to data unless clr done first */ // #define N_RPN_REGS 10 moved to wgrib2.h #define STACK_SIZE 10 extern int decode, latlon; extern double *lat, *lon; extern int match_flag; extern const char *item_deliminator; extern int use_scale; /* note: rpn_n[N_RPN_REGS], and rpn_data[N_RPN_REG] */ size_t rpn_n[N_RPN_REGS] = { 0 }; float *rpn_data[N_RPN_REGS] = { NULL }; static float *stack[STACK_SIZE]; #define SCALAR 0 #define VECTOR 1 #define DBL_VEC 2 #ifndef M_PI #define M_PI 3.14159265358979323846 /* pi */ #endif int push(int top, unsigned int ndata, int type, float f, float *ff, double *d); static void gbl_wt(double *val, double *wt, float *data, int i, int j, int nx, int ny, double wt0); static void reg_wt(double *val, double *wt, float *data, int i, int j, int nx, int ny, double wt0); /* * HEADER:100:rpn:misc:1:reverse polish notation calculator */ int f_rpn(ARG1) { char string[100]; const char *p; int j, n; unsigned int i, k, m; float f; float tmp; int top, flag; double cos_lat, last_lat; double sum1, sum2, wt, sq1, sq2, sq12; int nx, ny, res, scan; unsigned int npnts; float *p1, *p2; int year, month, day, hour, minute, second; static int state=0; if (mode == -1) { decode = latlon = 1; if (state == 0) { /* check compile-time configuration */ if (sizeof(rpn_n)/sizeof(size_t) != N_RPN_REGS) fatal_error("RPN: configure N_RPN_REGS and rpn_n[]",""); if (sizeof(rpn_data)/sizeof(float *) != N_RPN_REGS) fatal_error("RPN: configure N_RPN_REGS and rpn_data[]",""); state = 1; } return 0; } if (mode == -2) { /* 5/2015 no cleanup for callable wgrib2, preserve registers if (state == 1) { for (i = 0; i < N_RPN_REGS; i++) { if (rpn_data[i]) { free (rpn_data[i]); rpn_data[i] = NULL; rpn_n[i] = 0; } } } state = 0; */ return 0; } // initialize stack if (data == NULL) fatal_error("rpn: decode failed",""); use_scale = 0; for (i = 0; i < STACK_SIZE; i++) stack[i] = NULL; top = push(-1, ndata, VECTOR, 0.0, data, NULL); if (mode == 98) fprintf(stderr,"RPN: arg=%s\n",arg1); // scan parameters p = arg1; while (sscanf(p,"%[^:]%n", string, &n) == 1) { if (mode == 98) fprintf(stderr, "RPN: top=%d (%s)", top, string); p = p + n; if (*p == ':') p++; // binary operators + - * / if (strcmp(string,"+") == 0) { if (mode == 98) fprintf(stderr," plus"); if (top <= 0) fatal_error("rpn: bad + expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = stack[j][i] + stack[top][i]; } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"-") == 0) { if (mode == 98) fprintf(stderr," minus"); if (top <= 0) fatal_error("rpn: bad - expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = stack[j][i] - stack[top][i]; } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"*") == 0) { if (mode == 98) fprintf(stderr," times"); if (top <= 0) fatal_error("rpn: bad * expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = stack[j][i] * stack[top][i]; } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"/") == 0) { if (mode == 98) fprintf(stderr," div"); if (top <= 0) fatal_error("rpn: bad / expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i]) && (stack[top][i] != 0.0)) { stack[j][i] = stack[j][i] / stack[top][i]; } else stack[j][i] = UNDEFINED; } top--; } // merge: stack(top-1) = stack(top) (if defined) ; top--; else if (strcmp(string,"merge") == 0) { if (mode == 98) fprintf(stderr," merge"); if (top <= 0) fatal_error("rpn: bad merge expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[j][i] = stack[top][i]; } } top--; } // exc (swap top and top-1 stack entries) else if (strcmp(string,"exc") == 0 || strcmp(string,"swap") == 0) { if (mode == 98) fprintf(stderr," exchange"); if (top <= 0) fatal_error("rpn: bad exc/swap expression",""); j = top-1; for (i = 0; i < ndata; i++) { f = stack[j][i]; stack[j][i] = stack[top][i]; stack[top][i] = f; } } // pop: top--; else if (strcmp(string,"pop") == 0) { if (mode == 98) fprintf(stderr," pop"); if (top < 0) fatal_error("rpn: bad pop",""); top--; } // dup: top++; stack(top) = stack(top-1) else if (strcmp(string,"dup") == 0) { if (mode == 98) fprintf(stderr," dup"); top = push(top,ndata,VECTOR,0.0,stack[top],NULL); } // sqrt: stack(top) = sqrt(stack(top)) else if (strcmp(string,"sqrt") == 0) { if (mode == 98) fprintf(stderr," sqrt"); if (top < 0) fatal_error("rpn: bad sqrt expression",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && stack[top][i] >= 0.0) { stack[top][i] = sqrtf(stack[top][i]); } else stack[top][i] = UNDEFINED; } } // sq: x*x else if (strcmp(string,"sq") == 0) { if (mode == 98) fprintf(stderr," sq"); if (top < 0) fatal_error("rpn: bad sq expression",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] *= stack[top][i]; } } } // pow: x^y else if (strcmp(string,"pow") == 0) { if (top <= 0) fatal_error("rpn: bad pow expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = powf(stack[j][i], stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } // ln - natural log else if (strcmp(string,"ln") == 0) { if (top < 0) fatal_error("rpn: bad log expression",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && stack[top][i] > 0.0) { stack[top][i] = logf(stack[top][i]); } else stack[top][i] = UNDEFINED; } } // exp else if (strcmp(string,"exp") == 0) { if (top < 0) fatal_error("rpn: bad exp expression",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = expf(stack[top][i]); } } } // abs else if (strcmp(string,"abs") == 0) { if (top < 0) fatal_error("rpn: bad abs expression",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { if (stack[top][i] < 0.0) stack[top][i] = -stack[top][i]; } } } // 1/x else if (strcmp(string,"1/x") == 0) { if (mode == 98) fprintf(stderr," 1/x"); if (top < 0) fatal_error("rpn: bad 1/x",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && stack[top][i] != 0.0) { stack[top][i] = 1.0 / stack[top][i]; } else stack[top][i] = UNDEFINED; } } // floor else if (strcmp(string,"floor") == 0) { if (top < 0) fatal_error("rpn: bad floor",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = floorf(stack[top][i]); } } } // ceil else if (strcmp(string,"ceil") == 0) { if (top < 0) fatal_error("rpn: bad ceil",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = ceilf(stack[top][i]); } } } // sin cos tan asin acos atan else if (strcmp(string,"sin") == 0) { if (top < 0) fatal_error("rpn: bad sin",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = sinf(stack[top][i]); } } } else if (strcmp(string,"cos") == 0) { if (top < 0) fatal_error("rpn: bad cos",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = cosf(stack[top][i]); } } } else if (strcmp(string,"tan") == 0) { if (top < 0) fatal_error("rpn: bad tan",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = tanf(stack[top][i]); } } } else if (strcmp(string,"asin") == 0) { if (top < 0) fatal_error("rpn: bad asin",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { if (fabsf(stack[top][i]) > 1.0) stack[top][i] = UNDEFINED; else stack[top][i] = asinf(stack[top][i]); } } } else if (strcmp(string,"acos") == 0) { if (top < 0) fatal_error("rpn: bad acos",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { if (fabsf(stack[top][i]) > 1.0) stack[top][i] = UNDEFINED; else stack[top][i] = acosf(stack[top][i]); } } } else if (strcmp(string,"atan") == 0) { if (top < 0) fatal_error("rpn: bad atan",""); for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { stack[top][i] = atanf(stack[top][i]); } } } else if (strcmp(string,"atan2") == 0) { if (top <= 0) fatal_error("rpn: bad atan2 expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = atan2f(stack[j][i], stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } // sto_N else if (string[0] == 's' && string[1] == 't' && string[2] == 'o' && string[3] == '_' && isdigit((unsigned char) string[4]) && (string[5] == 0 || (isdigit((unsigned char) string[5]) && string[6] == 0) )) { if (top < 0) fatal_error("rpn: sto",""); j = atoi(string+4); if (j >= N_RPN_REGS || j < 0) fatal_error("rpn: bad register number in %s", string); if (ndata != rpn_n[j]) { if (rpn_data[j]) free(rpn_data[j]); rpn_n[j] = ndata; rpn_data[j] = (float *) malloc(sizeof(float) * (size_t) ndata); if (rpn_data[j] == NULL) fatal_error("rpn: memory allocation failed in %s",string); } for (i=0; i < ndata; i++) { rpn_data[j][i] = stack[top][i]; } } // rcl_N else if (string[0] == 'r' && string[1] == 'c' && string[2] == 'l' && string[3] == '_' && isdigit((unsigned char) string[4]) && (string[5] == 0 || (isdigit((unsigned char) string[5]) && string[6] == 0) )) { j = atoi(string+4); if (j >= N_RPN_REGS || j < 0) fatal_error("rpn: bad register number in %s", string); if (rpn_n[j] != 0 && rpn_n[j] != ndata) fatal_error("rpn: rcl size mismatch",""); if (rpn_n[j] == 0) { // unused register are zero top = push(top,ndata,SCALAR,0.0,rpn_data[j],NULL); } else { top = push(top,ndata,VECTOR,0.0,rpn_data[j],NULL); } } // clr_N else if (string[0] == 'c' && string[1] == 'l' && string[2] == 'r' && string[3] == '_' && isdigit((unsigned char) string[4]) && (string[5] == 0 || (isdigit((unsigned char) string[5]) && string[6] == 0) )) { j = atoi(string+4); if (j >= N_RPN_REGS || j < 0) fatal_error("rpn: bad register number in %s", string); if (rpn_data[j]) { free(rpn_data[j]); rpn_data[j] = NULL; } rpn_n[j] = 0; } // rcl_lat else if (strcmp(string,"rcl_lat") == 0) { if (lat == NULL) fatal_error("rpn: rcl_lat: lat not defined",""); top = push(top,ndata,DBL_VEC,0.0,NULL,lat); } // rcl_lon else if (strcmp(string,"rcl_lon") == 0) { if (lon == NULL) fatal_error("rpn: rcl_lon: lon not defined",""); top = push(top,ndata,DBL_VEC,0.0,NULL,lon); } // max and min else if (strcmp(string,"max") == 0) { if (top <= 0) fatal_error("rpn: bad max expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (stack[j][i] < stack[top][i]) stack[j][i] = stack[top][i]; } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"min") == 0) { if (top <= 0) fatal_error("rpn: bad min expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (stack[j][i] > stack[top][i]) stack[j][i] = stack[top][i]; } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,">") == 0) { if (top <= 0) fatal_error("rpn: bad > expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = (stack[j][i] > stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,">=") == 0) { if (top <= 0) fatal_error("rpn: bad >= expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = (stack[j][i] >= stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"!=") == 0) { if (top <= 0) fatal_error("rpn: bad != expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = (stack[j][i] != stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"==") == 0) { if (top <= 0) fatal_error("rpn: bad == expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = (stack[j][i] == stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"<") == 0) { if (top <= 0) fatal_error("rpn: bad < expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = (stack[j][i] < stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"<=") == 0) { if (top <= 0) fatal_error("rpn: bad <= expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = (stack[j][i] <= stack[top][i]); } else stack[j][i] = UNDEFINED; } top--; } else if (strcmp(string,"mask") == 0) { if (top <= 0) fatal_error("rpn: bad mask expression",""); j = top-1; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (stack[top][i] == 0.0) stack[j][i] = UNDEFINED; } else stack[j][i] = UNDEFINED; } top--; } // yrev - like in GrADS : N <-> S else if (strcmp(string,"yrev") == 0) { if (top < 0) fatal_error("rpn: yrev needs field",""); get_nxny(sec, &nx, &ny, &npnts, &res, &scan); if (nx <= 0 || ny <= 0) fatal_error("rpn: yrev only on nx x ny grids",""); if ((scan >> 4) != 0 && (scan >> 4) != 4) fatal_error("rpn: yrev only appropriate for we:ns and we:sn grids",""); for (k = 0; k < ny/2; k++) { p1 = stack[top] + nx*k; p2 = stack[top] + nx*(ny-k-1); for (m = 0; m < nx; m++) { tmp = p1[m]; p1[m] = p2[m]; p2[m] = tmp; } } } // smth9 - like in GrADS smth9g - global field else if (strcmp(string,"smth9g") == 0) { if (mode == 98) fprintf(stderr," smth9"); if (top < 0) fatal_error("rpn: smth9 needs field",""); get_nxny(sec, &nx, &ny, &npnts, &res, &scan); if (nx <= 0 || ny <= 0) fatal_error("rpn: yrev only on nx x ny grids",""); if ((scan >> 4) != 0 && (scan >> 4) != 4) fatal_error("rpn: smth9 only appropriate for we:ns and we:sn grids",""); top = push(top,ndata,VECTOR,0.0,stack[top],NULL); for (m = 0; m < ny; m++) { for (i = 0; i < nx; i++) { wt = sum1 = 0.0; gbl_wt(&sum1, &wt, stack[top], i-1, m-1, nx, ny,0.3); gbl_wt(&sum1, &wt, stack[top], i , m-1, nx, ny,0.5); gbl_wt(&sum1, &wt, stack[top], i+1, m-1, nx, ny,0.3); gbl_wt(&sum1, &wt, stack[top], i-1, m , nx, ny,0.5); gbl_wt(&sum1, &wt, stack[top], i , m , nx, ny,1.0); gbl_wt(&sum1, &wt, stack[top], i+1, m , nx, ny,0.5); gbl_wt(&sum1, &wt, stack[top], i-1, m+1, nx, ny,0.3); gbl_wt(&sum1, &wt, stack[top], i , m+1, nx, ny,0.5); gbl_wt(&sum1, &wt, stack[top], i+1, m+1, nx, ny,0.3); stack[top-1][i + m*nx] = wt > 0.0 ? sum1/wt : UNDEFINED; } } top--; } // smth9r - like in GrADS smth9g - regional field else if (strcmp(string,"smth9r") == 0) { if (mode == 98) fprintf(stderr," smth9"); if (top < 0) fatal_error("rpn: smth9 needs field",""); get_nxny(sec, &nx, &ny, &npnts, &res, &scan); if (nx <= 0 || ny <= 0) fatal_error("rpn: yrev only on nx x ny grids",""); if ((scan >> 4) != 0 && (scan >> 4) != 4) fatal_error("rpn: smth9 only appropriate for we:ns and we:sn grids",""); top = push(top,ndata,VECTOR,0.0,stack[top],NULL); for (m = 0; m < ny; m++) { for (i = 0; i < nx; i++) { wt = sum1 = 0.0; reg_wt(&sum1, &wt, stack[top], i-1, m-1, nx, ny,0.3); reg_wt(&sum1, &wt, stack[top], i , m-1, nx, ny,0.5); reg_wt(&sum1, &wt, stack[top], i+1, m-1, nx, ny,0.3); reg_wt(&sum1, &wt, stack[top], i-1, m , nx, ny,0.5); reg_wt(&sum1, &wt, stack[top], i , m , nx, ny,1.0); reg_wt(&sum1, &wt, stack[top], i+1, m , nx, ny,0.5); reg_wt(&sum1, &wt, stack[top], i-1, m+1, nx, ny,0.3); reg_wt(&sum1, &wt, stack[top], i , m+1, nx, ny,0.5); reg_wt(&sum1, &wt, stack[top], i+1, m+1, nx, ny,0.3); stack[top-1][i + m*nx] = wt > 0.0 ? sum1/wt : UNDEFINED; } } top--; } else if (strcmp(string,"alt_x_scan") == 0) { if (top < 0) fatal_error("rpn: yrev needs field",""); get_nxny(sec, &nx, &ny, &npnts, &res, &scan); if (nx <= 0 || ny <= 0) fatal_error("rpn: alt_x_scan only works on nx x ny grids",""); for (k = 1; k < ny; k += 2) { p1 = stack[top] + nx*k; p2 = p1 + nx - 1; for (m = 0; m < nx/2; m++) { tmp = *p1; *p1++ = *p2; *p2-- = tmp; } } } else if (strcmp(string,"xave") == 0) { // x average the field if (top < 0) fatal_error("rpn: xave needs field",""); get_nxny(sec, &nx, &ny, &npnts, &res, &scan); if (nx <= 0 || ny <= 0) fatal_error("rpn: xave only works on nx x ny grids",""); for (k = 0; k < ndata; k += nx) { sum1 = 0.0; i = 0; for (m = 0; m < nx; m++) { if (DEFINED_VAL(stack[top][k+m])) { sum1 += stack[top][k+m]; i++; } } tmp = i ? sum1 / (double) i : 0.0; for (m = 0; m < nx; m++) { if (DEFINED_VAL(stack[top][k+m])) { stack[top][k+m] = tmp; } } } } else if (strcmp(string,"xdev") == 0) { // deviation from zonal mean if (top < 0) fatal_error("rpn: xave needs field",""); get_nxny(sec, &nx, &ny, &npnts, &res, &scan); if (nx <= 0 || ny <= 0) fatal_error("rpn: xave only works on nx x ny grids",""); for (k = 0; k < ndata; k += nx) { sum1 = 0.0; i = 0; for (m = 0; m < nx; m++) { if (DEFINED_VAL(stack[top][k+m])) { sum1 += stack[top][k+m]; i++; } } tmp = i ? sum1 / (double) i : 0.0; for (m = 0; m < nx; m++) { if (DEFINED_VAL(stack[top][k+m])) { stack[top][k+m] -= tmp; } } } } // change to rcl-data, rcl-lat, rcl-lon // rcl: stack(++top) = data else if (strcmp(string,"rcl") == 0) { top = push(top,ndata,VECTOR,0.0,data,NULL); } // sto: data = stack(top) else if (strcmp(string,"sto") == 0) { if (top < 0) fatal_error("rpn: bad sto",""); for (i=0; i < ndata; i++) { data[i] = stack[top][i]; } } // clr: emtpy stack else if (strcmp(string,"clr") == 0) { top = -1; } // pi: stack(++top) = pi else if (strcmp(string,"pi") == 0) { top = push(top,ndata,SCALAR,(float) M_PI,NULL,NULL); } // rand: stack(++top) = random number from 0..1 // note: rand() is not thread safe, do not OpenMP // srand(seed) could be called first to set up seed // since srand is not called, seed is 1 else if (strcmp(string,"rand") == 0) { if (mode == 98) fprintf(stderr," rand"); top = push(top,ndata,SCALAR,(float) 0.0f,NULL,NULL); for (i = 0; i < ndata; i++) { stack[top][i] = (double) rand() / (double) RAND_MAX; } } else if (strcmp(string,"days_in_ref_month") == 0) { reftime(sec, &year, &month, &day, &hour, &minute, &second); i = num_days_in_month(year, month); top = push(top,ndata,SCALAR,(float) i,NULL,NULL); } else if (strcmp(string,"days_in_verf_month") == 0) { verftime(sec, &year, &month, &day, &hour, &minute, &second); i = num_days_in_month(year, month); top = push(top,ndata,SCALAR,(float) i,NULL,NULL); } // print operations .. doesnt affect the stack else if (strcmp(string,"print_max") == 0) { if (top < 0) fatal_error("rpn: bad print_max expression",""); flag = 0; tmp = 0.0; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { if (flag) tmp = (tmp < stack[top][i]) ? stack[top][i] : tmp; else { flag = 1; tmp = stack[top][i]; } } } sprintf(inv_out,"%srpn_max=%g",item_deliminator,tmp); inv_out += strlen(inv_out); } else if (strcmp(string,"print_min") == 0) { if (top < 0) fatal_error("rpn: bad print_min expression",""); flag = 0; tmp = 0.0; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { if (flag) tmp = (tmp > stack[top][i]) ? stack[top][i] : tmp; else { flag = 1; tmp = stack[top][i]; } } } sprintf(inv_out,"%srpn_min=%g",item_deliminator,tmp); inv_out += strlen(inv_out); } // print_diff: prints out cosine weighted difference (push - top) else if (strcmp(string,"print_diff") == 0) { if (top <= 0) fatal_error("rpn: print_rms needs two fields",""); j = top - 1; last_lat = 0; cos_lat = 1.0; sum1 = wt = 0.0; if (lat != NULL) { for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (last_lat != lat[i]) { cos_lat = cos(lat[i]*M_PI/180.0); last_lat = lat[i]; } sum1 += (stack[j][i] - stack[top][i]) * cos_lat; wt += cos_lat; } } } else { #pragma omp parallel for private(i) reduction(+:wt,sum1) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { sum1 += (stack[j][i] - stack[top][i]); wt += 1.0; } } } if (wt != 0.0) sprintf(inv_out,"%srpn_diff=%g",item_deliminator,sum1/wt); else sprintf(inv_out,"%srpn_diff=undefined",item_deliminator); inv_out += strlen(inv_out); } // print_rms: prints out cosine weighted RMS else if (strcmp(string,"print_rms") == 0) { if (top <= 0) fatal_error("rpn: print_rms needs two fields",""); j = top - 1; last_lat = 0; cos_lat = 1.0; sum1 = wt = 0.0; if (lat != NULL) { for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (last_lat != lat[i]) { cos_lat = cos(lat[i]*M_PI/180.0); last_lat = lat[i]; } sum1 += (stack[top][i] - stack[j][i]) * (stack[top][i] - stack[j][i]) * cos_lat; wt += cos_lat; } } } else { #pragma omp parallel for private(i) reduction(+:wt,sum1) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { sum1 += (stack[top][i] - stack[j][i]) * (stack[top][i] - stack[j][i]); wt += 1.0; } } } if (wt != 0.0) sprintf(inv_out,"%srpn_rms=%g",item_deliminator,sqrt(sum1/wt)); else sprintf(inv_out,"%srpn_rms=undefined",item_deliminator); inv_out += strlen(inv_out); } // print_ave: prints out cosine weighted ave else if (strcmp(string,"print_ave") == 0) { if (top < 0) fatal_error("rpn: bad print_ave expression",""); // if (lat == NULL) fatal_error("rpn: print_ave .. no latitudes defined",""); last_lat = 0; cos_lat = 1.0; sum1 = wt = 0.0; if (lat != NULL) { for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { if (last_lat != lat[i]) { cos_lat = cos(lat[i]*M_PI/180.0); last_lat = lat[i]; } sum1 += stack[top][i] * cos_lat; wt += cos_lat; } } } else { #pragma omp parallel for private(i) reduction(+:wt,sum1) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i])) { sum1 += stack[top][i]; wt += 1.0; } } } if (wt != 0.0) sprintf(inv_out,"%srpn_ave=%g",item_deliminator,sum1/wt); else sprintf(inv_out,"%srpn_ave=undefined",item_deliminator); inv_out += strlen(inv_out); } // print_wt_ave: prints weighted ave, X=data, Y=weights else if (strcmp(string,"print_wt_ave") == 0) { if (top <= 0) fatal_error("rpn: print_wt_ave needs two fields",""); j = top - 1; sum1 = sum2 = 0.0; // find mean values for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { sum1 += stack[j][i]*stack[top][i]; sum2 += stack[top][i]; } } if (sum2 != 0.0) sum1 = sum1 / sum2; sprintf(inv_out,"%srpn_wt_ave=%g",item_deliminator,sum1); } // print_corr: prints cosine(lat) weighted spatial correlation else if (strcmp(string,"print_corr") == 0) { if (top <= 0) fatal_error("rpn: print_corr needs two fields",""); // if (lat == NULL) fatal_error("rpn: print_corr .. no latitudes defined",""); j = top - 1; sum1 = sum2 = wt = 0.0; last_lat = 0; cos_lat = 1.0; sq1 = sq2 = sq12 = 0.0; if (lat != NULL) { // find mean values for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (last_lat != lat[i]) { cos_lat = cos(lat[i]*M_PI/180.0); last_lat = lat[i]; } sum1 += stack[top][i] * cos_lat; sum2 += stack[j][i] * cos_lat; wt += cos_lat; } } sum1 = sum1 / wt; sum2 = sum2 / wt; for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { if (last_lat != lat[i]) { cos_lat = cos(lat[i]*M_PI/180.0); last_lat = lat[i]; } sq1 += (stack[top][i]-sum1)*(stack[top][i]-sum1)*cos_lat; sq2 += (stack[j][i]-sum2)*(stack[j][i]-sum2)*cos_lat; sq12 += (stack[top][i]-sum1)*(stack[j][i]-sum2)*cos_lat; } } } else { // find mean values #pragma omp parallel for private(i) reduction(+:wt,sum1,sum2) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { sum1 += stack[top][i]; sum2 += stack[j][i]; wt += 1.0; } } sum1 = sum1 / wt; sum2 = sum2 / wt; #pragma omp parallel for private(i) reduction(+:sq1,sq2,sq12) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { sq1 += (stack[top][i]-sum1)*(stack[top][i]-sum1); sq2 += (stack[j][i]-sum2)*(stack[j][i]-sum2); sq12 += (stack[top][i]-sum1)*(stack[j][i]-sum2); } } } sq1 = sq1 / wt; sq2 = sq2 / wt; sq12 = sq12 / wt; if (sq1*sq2 == 0.0) sprintf(inv_out,"%srpn_corr=%g",item_deliminator,1.0); else sprintf(inv_out,"%srpn_corr=%g",item_deliminator, sq12/sqrt(sq1*sq2)); inv_out += strlen(inv_out); } // number: stack(++top) = number else if (string[0] == '+' || string[0] == '-' || string[0] == '.' || isdigit((unsigned char) string[0])) { f = atof(string); top = push(top,ndata,SCALAR,f,NULL,NULL); if (mode == 98) fprintf(stderr," constant=%f", f); } else fatal_error("rpn: unidentified symbol %s", string); if (mode == 98) fprintf(stderr," top=%d\n", top); } if (*p != 0) fatal_error("-rpn didn't find operatore or value before %s",p); if (top >= 0) { for (i = 0; i < ndata; i++) { data[i] = stack[top][i]; } } else fatal_error("rpn: stack empty",""); // free stack for (i = 0; i < STACK_SIZE; i++) free(stack[i]); return 0; } int push(int top, unsigned int ndata, int type, float f, float *ff, double *d) { unsigned int i; if (++top == STACK_SIZE) fatal_error_i("rpn: push: stack overflow %d",top); if (stack[top] == NULL) { stack[top] = (float *) malloc(sizeof(float) * (size_t) ndata); if (stack[top] == NULL) fatal_error("rpn: push: memory allocation",""); } if (type == SCALAR) { for (i = 0; i < ndata; i++) stack[top][i] = f; } else if (type == VECTOR) { for (i = 0; i < ndata; i++) stack[top][i] = ff[i]; } else if (type == DBL_VEC) { for (i = 0; i < ndata; i++) stack[top][i] = (float) d[i]; } return top; } /* * HEADER:100:if_reg:misc:1:if rpn registers defined, X = A, A:B, A:B:C, etc A = register number */ int f_if_reg(ARG1) { int i, j, *list; const char *p; if (mode == -1) { // figure out the number of arguments i = 1; p = arg1; while (*p) { if (*p++ == ':') i++; } *local = list = (int *) calloc(i+1, sizeof (int)); if (list == NULL) fatal_error("if_reg: memory allocation failed",""); list[0] = i; p = arg1; for (j = 1; j <= i; j++) { list[j] = atoi(p); if (list[j] >= N_RPN_REGS || list[j] < 0) fatal_error_i("if_reg: bad register %d", list[j]); while (isdigit((unsigned char) *p)) p++; if (*p == ':') p++; } } else if (mode == -2) { list = (int *) *local; free(list); } else if (mode >= 0) { list = (int *) *local; i = list[0]; match_flag = 0; for (j=1; j <= i; j++) { if (rpn_n[list[j]] == 0) match_flag = 1; } } return 0; } /* * HEADER:100:rpn_rcl:misc:1:data = register X .. same as -rpn rcl_X .. no geolocation calc needed */ int f_rpn_rcl(ARG1) { int reg; if (mode == -1) { decode = 1; } else if (mode >= 0) { reg = atoi(arg1); if (reg < 0 || reg >= N_RPN_REGS) fatal_error_i("rpn_rcl: bad register %d", reg); if (ndata != rpn_n[reg]) fatal_error("rpn_rcl: size mismatch",""); use_scale = 0; memcpy(data, rpn_data[reg], ndata * sizeof(float)); } return 0; } /* * HEADER:100:rpn_sto:misc:1:register X = data.. same as -rpn sto_X .. no geolocation calc needed */ int f_rpn_sto(ARG1) { int reg; if (mode == -1) { decode = 1; } else if (mode >= 0) { reg = atoi(arg1); if (reg < 0 || reg >= N_RPN_REGS) fatal_error_i("rpn_sto: bad register %d", reg); if (ndata != rpn_n[reg]) { if (rpn_n[reg] != 0) free(rpn_data[reg]); rpn_data[reg] = (float *) malloc(sizeof(float) * (size_t) ndata); if (rpn_data[reg] == NULL) { rpn_n[reg] = 0; fatal_error("rpn_sto: memory allocation",""); } rpn_n[reg] = ndata; } memcpy(rpn_data[reg], data, ndata * sizeof(float)); } return 0; } static void gbl_wt(double *sum, double *wt, float *data, int i, int j, int nx, int ny, double wt0) { float t; i = (i == -1) ? nx-1 : i; i = (i == nx) ? 0 : i; if (i < 0 || i >= nx || j < 0 || j >= ny) return; t = data[i + j*nx]; if (UNDEFINED_VAL(t)) return; *wt = *wt + wt0; *sum = *sum + t*wt0; return; } static void reg_wt(double *sum, double *wt, float *data, int i, int j, int nx, int ny, double wt0) { float t; if (i < 0 || i >= nx || j < 0 || j >= ny) return; t = data[i + j*nx]; if (UNDEFINED_VAL(t)) return; *wt = *wt + wt0; *sum = *sum + t*wt0; return; } /* * routines to allow code to get or set various RPN registers * * for example: read field, save in reg_0 * allocate array to match read grid dimensions * copy reg_0 to array */ size_t wgrib2_get_reg_size(int reg) { if (reg < 0 ||reg >= N_RPN_REGS) return 0; return rpn_n[reg]; } int wgrib2_get_reg_data(float *data, size_t size, int reg) { if (reg < 0 || reg >= N_RPN_REGS) return 1; if (rpn_n[reg] != size) return 2; memcpy(data, rpn_data[reg], sizeof(float) * (size_t) size); return 0; } int wgrib2_set_reg(float *data, size_t size, int reg) { if (reg < 0 || reg >= N_RPN_REGS) return 1; if (rpn_n[reg] != size) { if (rpn_data[reg] != NULL) free(rpn_data[reg]); rpn_n[reg] = 0; rpn_data[reg] = (float *) malloc(sizeof(float) * (size_t) size); if (rpn_data[reg] == NULL) return 2; rpn_n[reg] = size; } memcpy(rpn_data[reg], data, size * sizeof(float)); return 0; }
control_tool.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> int main() { #pragma omp parallel num_threads(1) { print_frame_from_outlined_fn(1); print_frame(0); omp_control_tool(omp_control_tool_flush, 1, NULL); print_current_address(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_control_tool' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address({{.}})=[[EXIT_FRAME:0x[0-f]*]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER_FRAME:0x[0-f]*]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_control_tool: command=3, modifier=1, arg=[[NULL]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]*]], current_task_frame.exit=[[EXIT_FRAME]], current_task_frame.reenter=[[REENTER_FRAME]] // CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]] return 0; }
jacobi.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define REAL float static double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define DEFAULT_DIMSIZE 256 void print_array(char *title, char *name, REAL *A, int n, int m) { printf("%s:\n", title); int i, j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize(int n, int m, REAL alpha, REAL *dx, REAL *dy, REAL *u_p, REAL *f_p) { int i; int j; int xx; int yy; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; //double PI=3.1415926; *dx = (2.0 / (n - 1)); *dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int) (-1.0 + (*dx * (i - 1)))); yy = ((int) (-1.0 + (*dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (((((-1.0 * alpha) * (1.0 - (xx * xx))) * (1.0 - (yy * yy))) - (2.0 * (1.0 - (xx * xx)))) - (2.0 * (1.0 - (yy * yy)))); } } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check(int n, int m, REAL alpha, REAL dx, REAL dy, REAL *u_p, REAL *f_p) { int i; int j; REAL xx; REAL yy; REAL temp; REAL error; error = 0.0; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (-1.0 + (dx * (i - 1))); yy = (-1.0 + (dy * (j - 1))); temp = (u[i][j] - ((1.0 - (xx * xx)) * (1.0 - (yy * yy)))); error = (error + (temp * temp)); } error = (sqrt(error) / (n * m)); printf("Solution Error: %2.6g\n", error); } void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits); void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits); int main(int argc, char *argv[]) { int n = DEFAULT_DIMSIZE; int m = DEFAULT_DIMSIZE; REAL alpha = 0.0543; REAL tol = 0.0000000001; REAL relax = 1.0; int mits = 5000; /*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n"); fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n); fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m); fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha); fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol); fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax); fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/ if (argc == 2) { sscanf(argv[1], "%d", &n); m = n; } else if (argc == 3) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); } else if (argc == 4) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); } else if (argc == 5) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); sscanf(argv[4], "%g", &tol); } else if (argc == 6) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); sscanf(argv[4], "%g", &tol); sscanf(argv[5], "%g", &relax); } else if (argc == 7) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%g", &alpha); sscanf(argv[4], "%g", &tol); sscanf(argv[5], "%g", &relax); sscanf(argv[6], "%d", &mits); } else { /* the rest of arg ignored */ } printf("jacobi %d %d %g %g %g %d\n", n, m, alpha, tol, relax, mits); printf("------------------------------------------------------------------------------------------------------\n"); /** init the array */ REAL *u = (REAL *) malloc(sizeof(REAL) * n * m); REAL *uomp = (REAL *) malloc(sizeof(REAL) * n * m); REAL *f = (REAL *) malloc(sizeof(REAL) * n * m); REAL dx; /* grid spacing in x direction */ REAL dy; /* grid spacing in y direction */ initialize(n, m, alpha, &dx, &dy, u, f); memcpy(uomp, u, sizeof(REAL) * n * m); double elapsed = read_timer_ms(); jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits); elapsed = read_timer_ms() - elapsed; printf("seq elasped time(ms): %4f\n", elapsed); double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; printf("MFLOPS: %12.6g\n", mflops); puts("================"); elapsed = read_timer_ms(); jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits); elapsed = read_timer_ms() - elapsed; printf("OpenMP elasped time(ms): %4f\n", elapsed); mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; printf("MFLOPS: %12.6g\n", mflops); //print_array("Sequential Run", "u",(REAL*)u, n, m); error_check(n, m, alpha, dx, dy, u, f); free(u); free(f); free(uomp); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * mits Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) { int i, j, k; REAL error; REAL ax; REAL ay; REAL b; REAL resid; REAL uold[n][m]; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha); error = (10.0 * tol); k = 1; while ((k <= mits) && (error > tol)) { error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < (n - 1); i++) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = sqrt(error) / (n * m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations: %d\n", k); printf("Residual: %.15g\n", error); } void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) { int i, j, k; REAL error; REAL ax; REAL ay; REAL b; REAL resid; REAL *tmp = (REAL *) malloc(sizeof(REAL) * n * m); REAL (*uold)[m] = (REAL (*)[m]) tmp; REAL (*u)[m] = (REAL (*)[m]) u_p; REAL (*f)[m] = (REAL (*)[m]) f_p; /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha); error = (10.0 * tol); k = 1; while ((k <= mits) && (error > tol)) { error = 0.0; //printf("===================== iteration %d ===========================\n", k); /* Copy new solution into old */ for (i = 0; i < n; i++) #pragma omp simd for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < (n - 1); i++) #pragma omp simd reduction(+:resid,error) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = sqrt(error) / (n * m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations: %d\n", k); printf("Residual: %.15g\n", error); free(tmp); }
GB_unop__identity_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_int64 // op(A') function: GB_unop_tran__identity_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_int64 ( uint32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireMagickMemory(sizeof(*image)); if (image == (Image *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=RGBColorspace; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; GetExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); image->x_resolution=DefaultResolution; image->y_resolution=DefaultResolution; image->units=PixelsPerInchResolution; GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SetImageVirtualPixelMethod(image,image_info->virtual_pixel_method); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view, *image_view; const Image *image; Image *append_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(image,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } append_image->matte=matte; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; append_view=AcquireCacheView(append_image); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(append_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict append_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((image->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); proceed=SetImageProgress(image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; MagickRealType scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image)); if (clone_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickMemory(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); GetExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if ((columns == 0) && (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=(MagickRealType) columns/(MagickRealType) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=(MagickRealType) rows/(MagickRealType) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->columns=columns; clone_image->rows=rows; clone_image->cache=ClonePixelCache(image->cache); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; if (image_info->view != (char *) NULL) (void) CloneString(&clone_info->view,image_info->view); if (image_info->authenticate != (char *) NULL) (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireCacheView(combine_image); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,PixelIntensityToQuantum(p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,PixelIntensityToQuantum(p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,PixelIntensityToQuantum(p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelOpacity(q,PixelIntensityToQuantum(p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,PixelIntensityToQuantum(p)); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) DestroyExceptionInfo(&image->exception); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. % % The format of the DisassociateImageStream method is: % % MagickBooleanType DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); (void) DetachBlob(image->blob); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { const char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) ResetMagickMemory(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) image_info->synchronize=IsMagickTrue(synchronize); exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info, % Image *image,const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; size_t length; canonical=MagickFalse; length=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } if (*q == '0') { ssize_t value; value=(ssize_t) strtol(q,&q,10); (void) value; } switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent- (p-format)),p,value); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; if ((image_info != (const ImageInfo *) NULL) && (image != (const Image *) NULL)) value=GetMagickProperty(image_info,image,pattern); else if (image != (Image *) NULL) value=GetImageProperty(image,pattern); else if (image_info != (ImageInfo *) NULL) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-length),value,(size_t) (MaxTextExtent-(p-format-length))); length+=strlen(pattern)-1; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info, % const size_t width,const size_t height, % const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height, const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Separate image channels. */ status=MagickTrue; if (channel == GrayChannels) image->matte=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,PixelIntensityToQuantum(q)); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImageChannel) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) image->matte=MagickFalse; (void) SetImageColorspace(image,RGBColorspace); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & OpacityChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,OpacityChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel, % OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel, % ShapeAlphaChannel, and TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickFalse; switch (alpha_type) { case ActivateAlphaChannel: { image->matte=MagickTrue; break; } case BackgroundAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case DeactivateAlphaChannel: { image->matte=MagickFalse; break; } case ShapeAlphaChannel: case CopyAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); image->matte=MagickTrue; break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); image->matte=MagickTrue; break; } case SetAlphaChannel: { if (image->matte == MagickFalse) { status=SetImageOpacity(image,OpaqueOpacity); image->matte=MagickTrue; } break; } case UndefinedAlphaChannel: break; } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->background_color.opacity != OpaqueOpacity) image->matte=MagickTrue; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { image->storage_class=storage_class; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image, const size_t columns,const size_t rows) { if ((columns == 0) || (rows == 0)) return(MagickFalse); image->columns=columns; image->rows=rows; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; if (frames == 0) { GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } } *extension='\0'; GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if (*extension != '\0') { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "EPHEMERAL", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); else { /* User specified image format. */ LocaleUpper(magic); if (IsMagickConflict(magic) == MagickFalse) { (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); if (LocaleCompare(magic,"EPHEMERAL") != 0) image_info->affirm=MagickTrue; else image_info->temporary=MagickTrue; } } magick_info=GetMagickInfo(magic,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy standard input or pipe to temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) ResetMagickMemory(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image, const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); image->matte=opacity != OpaqueOpacity ? MagickTrue : MagickFalse; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (IsMonochromeImage(image,&image->exception) == MagickFalse) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case GrayscaleType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { if (image->colorspace != CMYKColorspace) { if (IsRGBColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,RGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image->type=type; image_info=DestroyImageInfo(image_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireCacheView(left_image); right_view=AcquireCacheView(right_image); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireCacheView(top_image); bottom_view=AcquireCacheView(bottom_image); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireCacheView(smush_image); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); (void) SetImageArtifact(image,"png:include-chunk","none,trns,gama"); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->storage_class == DirectClass) return(MagickFalse); range_exception=MagickFalse; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (range_exception != MagickFalse) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() sync the image info options to the image. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); else units = image_info->units; if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->x_resolution=(double) ((size_t) (100.0*2.54* image->x_resolution+0.5))/100.0; image->y_resolution=(double) ((size_t) (100.0*2.54* image->y_resolution+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // author:BUG1989 (https://github.com/BUG1989/) Long-term support. // author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch) { Mat kernel_tm(4 * 4, inch, outch, 2ul); // G const short ktm[4][3] = { {2, 0, 0}, {1, 1, 1}, {1, -1, 1}, {0, 0, 2} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r = 0; r < 4; r++) { Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, 2u); int p = 0; for (; p + 7 < outch; p += 8) { const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16; const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16; const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16; const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16; const short* kernel4 = (const short*)kernel_tm + (p + 4) * inch * 16; const short* kernel5 = (const short*)kernel_tm + (p + 5) * inch * 16; const short* kernel6 = (const short*)kernel_tm + (p + 6) * inch * 16; const short* kernel7 = (const short*)kernel_tm + (p + 7) * inch * 16; short* ktmp = kernel_tm_test.channel(p / 8); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p + 3 < outch; p += 4) { const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16; const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16; const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16; const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16; short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p < outch; p++) { const short* kernel0 = (const short*)kernel_tm + p * inch * 16; short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles * 4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); #endif // __aarch64__ #else short d0[4], d1[4], d2[4], d3[4]; short w0[4], w1[4], w2[4], w3[4]; short t0[4], t1[4], t2[4], t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN int nRowBlocks = w_tm / 4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); int* output4_tm = top_blob_tm.channel(p + 4); int* output5_tm = top_blob_tm.channel(p + 5); int* output6_tm = top_blob_tm.channel(p + 6); int* output7_tm = top_blob_tm.channel(p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n + 4]; sum2[n] += (int)r0[n] * kptr[n + 8]; sum3[n] += (int)r0[n] * kptr[n + 12]; sum4[n] += (int)r0[n] * kptr[n + 16]; sum5[n] += (int)r0[n] * kptr[n + 20]; sum6[n] += (int)r0[n] * kptr[n + 24]; sum7[n] += (int)r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10"); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n + 4]; sum2[n] += (int)r0[n] * kptr[n + 8]; sum3[n] += (int)r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9"); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN int nRowBlocks = w_tm / 4; #if __ARM_NEON int32x2_t _shift = vdup_n_s32(-2); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4"); #endif // __aarch64__ #else int s0[4], s1[4], s2[4], s3[4]; int w0[4], w1[4]; int d0[2], d1[2], d2[2], d3[2]; int o0[2], o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 4]; s2[n] = out_tile[n + 8]; s3[n] = out_tile[n + 12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch) { Mat kernel_tm(6 * 6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r = 0; r < 9; r++) { Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, 2u); int p = 0; for (; p + 7 < outch; p += 8) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p + 1); const short* kernel2 = (const short*)kernel_tm.channel(p + 2); const short* kernel3 = (const short*)kernel_tm.channel(p + 3); const short* kernel4 = (const short*)kernel_tm.channel(p + 4); const short* kernel5 = (const short*)kernel_tm.channel(p + 5); const short* kernel6 = (const short*)kernel_tm.channel(p + 6); const short* kernel7 = (const short*)kernel_tm.channel(p + 7); short* ktmp = kernel_tm_test.channel(p / 8); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p + 1); const short* kernel2 = (const short*)kernel_tm.channel(p + 2); const short* kernel3 = (const short*)kernel_tm.channel(p + 3); short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const short* kernel0 = (const short*)kernel_tm.channel(p); short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0] = _n0[0]; out_tm0[1] = _n0[1]; out_tm0[2] = _n0[2]; out_tm0[3] = _n0[3]; out_tm1[0] = _n0[4]; out_tm1[1] = _n0[5]; out_tm1[2] = _n1[0]; out_tm1[3] = _n1[1]; out_tm2[0] = _n1[2]; out_tm2[1] = _n1[3]; out_tm2[2] = _n1[4]; out_tm2[3] = _n1[5]; out_tm3[0] = _n2[0]; out_tm3[1] = _n2[1]; out_tm3[2] = _n2[2]; out_tm3[3] = _n2[3]; out_tm4[0] = _n2[4]; out_tm4[1] = _n2[5]; out_tm4[2] = _n3[0]; out_tm4[3] = _n3[1]; out_tm5[0] = _n3[2]; out_tm5[1] = _n3[3]; out_tm5[2] = _n3[4]; out_tm5[3] = _n3[5]; out_tm6[0] = _n4[0]; out_tm6[1] = _n4[1]; out_tm6[2] = _n4[2]; out_tm6[3] = _n4[3]; out_tm7[0] = _n4[4]; out_tm7[1] = _n4[5]; out_tm7[2] = _n5[0]; out_tm7[3] = _n5[1]; out_tm8[0] = _n5[2]; out_tm8[1] = _n5[3]; out_tm8[2] = _n5[4]; out_tm8[3] = _n5[5]; #else short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); int* output4_tm = top_blob_tm.channel(p + 4); int* output5_tm = top_blob_tm.channel(p + 5); int* output6_tm = top_blob_tm.channel(p + 6); int* output7_tm = top_blob_tm.channel(p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n + 4]; sum2[n] += (int)r0[n] * kptr[n + 8]; sum3[n] += (int)r0[n] * kptr[n + 12]; sum4[n] += (int)r0[n] * kptr[n + 16]; sum5[n] += (int)r0[n] * kptr[n + 20]; sum6[n] += (int)r0[n] * kptr[n + 24]; sum7[n] += (int)r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10"); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n + 4]; sum2[n] += (int)r0[n] * kptr[n + 8]; sum3[n] += (int)r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9"); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; int* outRow2 = outRow0 + outw * 2; int* outRow3 = outRow0 + outw * 3; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w3; int32x2_t _w0n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile + 4); _s1 = vld1q_s32(out_tile + 6); _s1n = vld1_s32(out_tile + 10); _s2 = vld1q_s32(out_tile + 12); _s2n = vld1_s32(out_tile + 16); _s3 = vld1q_s32(out_tile + 18); _s3n = vld1_s32(out_tile + 22); _s4 = vld1q_s32(out_tile + 24); _s4n = vld1_s32(out_tile + 28); _s5 = vld1q_s32(out_tile + 30); _s5n = vld1_s32(out_tile + 34); // w = A_T * W int32x2_t _tp0 = {1, 4}; int32x2_t _tp1 = {2, 8}; // 4*s5[n] int32x4_t _s5x4 = vshlq_n_s32(_s5, 2); int32x2_t _s5x4n = vshl_n_s32(_s5n, 2); int32x4_t _t1p2 = vaddq_s32(_s1, _s2); int32x2_t _t1p2n = vadd_s32(_s1n, _s2n); int32x4_t _t3p4 = vaddq_s32(_s3, _s4); int32x2_t _t3p4n = vadd_s32(_s3n, _s4n); int32x4_t _t1s2 = vsubq_s32(_s1, _s2); int32x2_t _t1s2n = vsub_s32(_s1n, _s2n); int32x4_t _t3s4 = vsubq_s32(_s3, _s4); int32x2_t _t3s4n = vsub_s32(_s3n, _s4n); _w0 = vaddq_s32(_s0, _t1p2); _w0n = vadd_s32(_s0n, _t1p2n); _w0 = vaddq_s32(_w0, _t3p4); _w0n = vadd_s32(_w0n, _t3p4n); _w0n = vmul_s32(_w0n, _tp0); // _w2,_w2n _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1); _t1p2n = vmul_s32(_t1p2n, _tp0); _w3 = vaddq_s32(_s5x4, _t1s2); _w3n = vadd_s32(_s5x4n, _t1s2n); _w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1); _w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1); _w3n = vmul_s32(_w3n, _tp0); // _w1, _w1n _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); _t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0); _t1s2n = vmul_s32(_t1s2n, _tp0); int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n); int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n); // transpose w to w_t #if __aarch64__ int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2); int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2); int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3); int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3); int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); _d0 = vreinterpretq_s32_s64(_dt0); _d1 = vreinterpretq_s32_s64(_dt1); _d2 = vreinterpretq_s32_s64(_dt2); _d3 = vreinterpretq_s32_s64(_dt3); _d4 = vtrn1q_s32(_w02n, _w13n); _d5 = vtrn2q_s32(_w02n, _w13n); #else asm volatile( "vtrn.32 %q[_w0], %q[_w1] \n" "vtrn.32 %q[_w2], %q[_w3] \n" "vswp %f[_w0], %e[_w2] \n" "vswp %f[_w1], %e[_w3] \n" "vtrn.32 %q[_w02n], %q[_w13n] \n" : [_w0] "+w"(_w0), [_w1] "+w"(_t1s2), [_w2] "+w"(_t1p2), [_w3] "+w"(_w3), [_w02n] "+w"(_w02n), [_w13n] "+w"(_w13n) : : "cc", "memory"); _d0 = _w0; _d1 = _t1s2; _d2 = _t1p2; _d3 = _w3; _d4 = _w02n; _d5 = _w13n; #endif // Y = A_T * w_t _t1p2 = vaddq_s32(_d1, _d2); _t3p4 = vaddq_s32(_d3, _d4); _t1s2 = vsubq_s32(_d1, _d2); _t3s4 = vsubq_s32(_d3, _d4); _o0 = vaddq_s32(_d0, _t1p2); _o0 = vaddq_s32(_o0, _t3p4); // _o2 _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _o3 = vaddq_s32(_d5, _t1s2); _o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1); // _o1 _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); // save to top blob tm float32x4_t _ot0 = vcvtq_f32_s32(_o0); float32x4_t _ot1 = vcvtq_f32_s32(_t1s2); float32x4_t _ot2 = vcvtq_f32_s32(_t1p2); float32x4_t _ot3 = vcvtq_f32_s32(_o3); _ot0 = vmulq_n_f32(_ot0, 0.0017361112); _ot1 = vmulq_n_f32(_ot1, 0.0017361112); _ot2 = vmulq_n_f32(_ot2, 0.0017361112); _ot3 = vmulq_n_f32(_ot3, 0.0017361112); _o0 = vcvtq_s32_f32(_ot0); _o1 = vcvtq_s32_f32(_ot1); _o2 = vcvtq_s32_f32(_ot2); _o3 = vcvtq_s32_f32(_ot3); vst1q_s32(outRow0, _o0); vst1q_s32(outRow1, _o1); vst1q_s32(outRow2, _o2); vst1q_s32(outRow3, _o3); #else int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; int w0[6], w1[6], w2[6], w3[6]; int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; int o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 5; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]; } for (int n = 5; n < 6; n++) { w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]); w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]); w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]); w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]); } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0] = _n0[0]; out_tm0[1] = _n0[1]; out_tm0[2] = _n0[2]; out_tm0[3] = _n0[3]; out_tm1[0] = _n0[4]; out_tm1[1] = _n0[5]; out_tm1[2] = _n1[0]; out_tm1[3] = _n1[1]; out_tm2[0] = _n1[2]; out_tm2[1] = _n1[3]; out_tm2[2] = _n1[4]; out_tm2[3] = _n1[5]; out_tm3[0] = _n2[0]; out_tm3[1] = _n2[1]; out_tm3[2] = _n2[2]; out_tm3[3] = _n2[3]; out_tm4[0] = _n2[4]; out_tm4[1] = _n2[5]; out_tm4[2] = _n3[0]; out_tm4[3] = _n3[1]; out_tm5[0] = _n3[2]; out_tm5[1] = _n3[3]; out_tm5[2] = _n3[4]; out_tm5[3] = _n3[5]; out_tm6[0] = _n4[0]; out_tm6[1] = _n4[1]; out_tm6[2] = _n4[2]; out_tm6[3] = _n4[3]; out_tm7[0] = _n4[4]; out_tm7[1] = _n4[5]; out_tm7[2] = _n5[0]; out_tm7[3] = _n5[1]; out_tm8[0] = _n5[2]; out_tm8[1] = _n5[3]; out_tm8[2] = _n5[4]; out_tm8[3] = _n5[5]; #else short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); int* output4_tm = top_blob_tm.channel(p + 4); int* output5_tm = top_blob_tm.channel(p + 5); int* output6_tm = top_blob_tm.channel(p + 6); int* output7_tm = top_blob_tm.channel(p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n + 4]; sum2[n] += (int)r0[n] * kptr[n + 8]; sum3[n] += (int)r0[n] * kptr[n + 12]; sum4[n] += (int)r0[n] * kptr[n + 16]; sum5[n] += (int)r0[n] * kptr[n + 20]; sum6[n] += (int)r0[n] * kptr[n + 24]; sum7[n] += (int)r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10"); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n + 4]; sum2[n] += (int)r0[n] * kptr[n + 8]; sum3[n] += (int)r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4); const short* r0 = bottom_blob_tm.channel(tiles * r + i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9"); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant0 = scales_dequant[p]; const float scale0 = scale_dequant0 / 576.0; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w3; int32x2_t _w0n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile + 4); _s1 = vld1q_s32(out_tile + 6); _s1n = vld1_s32(out_tile + 10); _s2 = vld1q_s32(out_tile + 12); _s2n = vld1_s32(out_tile + 16); _s3 = vld1q_s32(out_tile + 18); _s3n = vld1_s32(out_tile + 22); _s4 = vld1q_s32(out_tile + 24); _s4n = vld1_s32(out_tile + 28); _s5 = vld1q_s32(out_tile + 30); _s5n = vld1_s32(out_tile + 34); // w = A_T * W int32x2_t _tp0 = {1, 4}; int32x2_t _tp1 = {2, 8}; // 4*s5[n] int32x4_t _s5x4 = vshlq_n_s32(_s5, 2); int32x2_t _s5x4n = vshl_n_s32(_s5n, 2); int32x4_t _t1p2 = vaddq_s32(_s1, _s2); int32x2_t _t1p2n = vadd_s32(_s1n, _s2n); int32x4_t _t3p4 = vaddq_s32(_s3, _s4); int32x2_t _t3p4n = vadd_s32(_s3n, _s4n); int32x4_t _t1s2 = vsubq_s32(_s1, _s2); int32x2_t _t1s2n = vsub_s32(_s1n, _s2n); int32x4_t _t3s4 = vsubq_s32(_s3, _s4); int32x2_t _t3s4n = vsub_s32(_s3n, _s4n); _w0 = vaddq_s32(_s0, _t1p2); _w0n = vadd_s32(_s0n, _t1p2n); _w0 = vaddq_s32(_w0, _t3p4); _w0n = vadd_s32(_w0n, _t3p4n); _w0n = vmul_s32(_w0n, _tp0); // _w2,_w2n _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1); _t1p2n = vmul_s32(_t1p2n, _tp0); _w3 = vaddq_s32(_s5x4, _t1s2); _w3n = vadd_s32(_s5x4n, _t1s2n); _w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1); _w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1); _w3n = vmul_s32(_w3n, _tp0); // _w1, _w1n _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); _t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0); _t1s2n = vmul_s32(_t1s2n, _tp0); int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n); int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n); // transpose w to w_t #if __aarch64__ int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2); int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2); int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3); int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3); int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2)); int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3)); _d0 = vreinterpretq_s32_s64(_dt0); _d1 = vreinterpretq_s32_s64(_dt1); _d2 = vreinterpretq_s32_s64(_dt2); _d3 = vreinterpretq_s32_s64(_dt3); _d4 = vtrn1q_s32(_w02n, _w13n); _d5 = vtrn2q_s32(_w02n, _w13n); #else asm volatile( "vtrn.32 %q[_w0], %q[_w1] \n" "vtrn.32 %q[_w2], %q[_w3] \n" "vswp %f[_w0], %e[_w2] \n" "vswp %f[_w1], %e[_w3] \n" "vtrn.32 %q[_w02n], %q[_w13n] \n" : [_w0] "+w"(_w0), [_w1] "+w"(_t1s2), [_w2] "+w"(_t1p2), [_w3] "+w"(_w3), [_w02n] "+w"(_w02n), [_w13n] "+w"(_w13n) : : "cc", "memory"); _d0 = _w0; _d1 = _t1s2; _d2 = _t1p2; _d3 = _w3; _d4 = _w02n; _d5 = _w13n; #endif // Y = A_T * w_t _t1p2 = vaddq_s32(_d1, _d2); _t3p4 = vaddq_s32(_d3, _d4); _t1s2 = vsubq_s32(_d1, _d2); _t3s4 = vsubq_s32(_d3, _d4); _o0 = vaddq_s32(_d0, _t1p2); _o0 = vaddq_s32(_o0, _t3p4); // _o2 _t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1); _o3 = vaddq_s32(_d5, _t1s2); _o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1); // _o1 _t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0); // save to top blob tm float32x4_t _scale0 = vdupq_n_f32(scale0); float32x4_t _out0_f32 = vdupq_n_f32(bias0); float32x4_t _out1_f32 = vdupq_n_f32(bias0); float32x4_t _out2_f32 = vdupq_n_f32(bias0); float32x4_t _out3_f32 = vdupq_n_f32(bias0); _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_t1s2), _scale0); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_t1p2), _scale0); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0); vst1q_f32(outRow0, _out0_f32); vst1q_f32(outRow1, _out1_f32); vst1q_f32(outRow2, _out2_f32); vst1q_f32(outRow3, _out3_f32); #else int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; int w0[6], w1[6], w2[6], w3[6]; int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; int o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 5; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]; } for (int n = 5; n < 6; n++) { w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]); w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]); w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]); w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]); } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = (float)o0[n] * scale0 + bias0; outRow1[n] = (float)o1[n] * scale0 + bias0; outRow2[n] = (float)o2[n] * scale0 + bias0; outRow3[n] = (float)o3[n] * scale0 + bias0; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8, (size_t)1u); const signed char* kernel = _kernel; int p = 0; for (; p + 7 < outch; p += 8) { const signed char* k0 = kernel + (p + 0) * inch * 9; const signed char* k1 = kernel + (p + 1) * inch * 9; const signed char* k2 = kernel + (p + 2) * inch * 9; const signed char* k3 = kernel + (p + 3) * inch * 9; const signed char* k4 = kernel + (p + 4) * inch * 9; const signed char* k5 = kernel + (p + 5) * inch * 9; const signed char* k6 = kernel + (p + 6) * inch * 9; const signed char* k7 = kernel + (p + 7) * inch * 9; signed char* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch; q++) { for (int k = 0; k < 9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * 9; signed char* ktmp = kernel_tm.channel(p / 8 + p % 8); for (int q = 0; q < inch; q++) { for (int k = 0; k < 9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p + 0); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); Mat out4 = top_blob.channel(p + 4); Mat out5 = top_blob.channel(p + 5); Mat out6 = top_blob.channel(p + 6); Mat out7 = top_blob.channel(p + 7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p / 8); for (int q = 0; q < inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n" //r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" //out0 "ld1 {v10.4s, v11.4s}, [%2] \n" //out1 "ld1 {v12.4s, v13.4s}, [%3] \n" //out2 "ld1 {v14.4s, v15.4s}, [%4] \n" //out3 "ld1 {v16.4s, v17.4s}, [%5] \n" //out4 "ld1 {v18.4s, v19.4s}, [%6] \n" //out5 "ld1 {v20.4s, v21.4s}, [%7] \n" //out6 "ld1 {v22.4s, v23.4s}, [%8] \n" //out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n" //(k00-k70) "sshll v1.8h, v1.8b, #0 \n" //(k01-k71) "sshll v2.8h, v2.8b, #0 \n" //(k02-k72) "sshll v3.8h, v3.8b, #0 \n" // r0 "sshll v4.8h, v4.8b, #0 \n" // r1 "sshll v7.8h, v7.8b, #0 \n" // r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n" //r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n" //(k03-k73) "sshll v1.8h, v1.8b, #0 \n" //(k04-k74) "sshll v2.8h, v2.8b, #0 \n" //(k05-k75) "sshll v3.8h, v3.8b, #0 \n" // r3 "sshll v4.8h, v4.8b, #0 \n" // r4 "sshll v7.8h, v7.8b, #0 \n" // r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n" //r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n" //(k06-k76) "sshll v1.8h, v1.8b, #0 \n" //(k07-k77) "sshll v2.8h, v2.8b, #0 \n" //(k08-k78) "sshll v3.8h, v3.8b, #0 \n" // r6 "sshll v4.8h, v4.8b, #0 \n" // r7 "sshll v7.8h, v7.8b, #0 \n" // r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n" // reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n" // out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n" // out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n" // out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n" // out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n" // d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n" // d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n" // out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n" // out5 "vmovl.s8 q2, d2 \n" // q2(k02-k72) "vmovl.s8 q1, d1 \n" // q1(k01-k71) "vmovl.s8 q0, d0 \n" // q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n" // d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n" // out6 "vmovl.s8 q5, d9 \n" // q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n" // q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n" // q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n" // out7 "vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n" // d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n" // d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n" // d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n" // q2(k05-k75) "vmovl.s8 q1, d1 \n" // q1(k04-k74) "vmovl.s8 q0, d0 \n" // q0(k03-k73) "vmovl.s8 q5, d9 \n" // q5(a11-a115) "vmovl.s8 q4, d8 \n" // q4(a10-a114) "vmovl.s8 q6, d12 \n" // q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n" // d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n" // d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n" // d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n" // q2(k08-k78) "vmovl.s8 q1, d1 \n" // q1(k07-k77) "vmovl.s8 q0, d0 \n" // q0(k06-k76) "vmovl.s8 q5, d9 \n" // q5(a21-a215) "vmovl.s8 q4, d8 \n" // q4(a20-a214) "vmovl.s8 q6, d12 \n" // q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n" // out0 "vst1.s32 {d18-d19}, [%2]! \n" // out1 "vst1.s32 {d20-d21}, [%3]! \n" // out2 "vst1.s32 {d22-d23}, [%4]! \n" // out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n" // out4 "vst1.s32 {d26-d27}, [%6]! \n" // out5 "vst1.s32 {d28-d29}, [%7]! \n" // out6 "vst1.s32 {d30-d31}, [%8]! \n" // out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0); // (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1); // (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2); // (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0); // out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1); // out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2); // out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3); // out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0); // out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1); // out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2); // out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3); // out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp + 8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp + 16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp + 24); //(k03-k73) _k1_8 = vld1_s8(ktmp + 32); //(k04-k74) _k2_8 = vld1_s8(ktmp + 40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp + 48); //(k06-k76) _k1_8 = vld1_s8(ktmp + 56); //(k07-k77) _k2_8 = vld1_s8(ktmp + 64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n" // d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n" // d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n" // d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" // d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n" // d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n" // d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n" // d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n" // d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n" // d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n" // d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n" // out0 q10 "vld1.s32 {d20[1]}, [%1] \n" // out1 "vld1.s32 {d21[0]}, [%2] \n" // out2 "vld1.s32 {d21[1]}, [%3] \n" // out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n" // d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n" // d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n" // d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n" // out4 q11 "vld1.s32 {d22[1]}, [%5] \n" // out5 "vld1.s32 {d23[0]}, [%6] \n" // out6 "vld1.s32 {d23[1]}, [%7] \n" // out7 "vmull.s16 q6, d6, d0[0] \n" // a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n" // a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n" // a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n" // a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n" // a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n" // a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n" // d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n" // d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n" // d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n" // a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n" // a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n" // a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n" // a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n" // a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n" // a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n" // a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n" // a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n" // a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n" // a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n" // a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n" // a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n" // out0 "vst1.s32 {d20[1]}, [%1]! \n" // out1 "vst1.s32 {d21[0]}, [%2]! \n" // out2 "vst1.s32 {d21[1]}, [%3]! \n" // out3 "vst1.s32 {d22[0]}, [%4]! \n" // out4 "vst1.s32 {d22[1]}, [%5]! \n" // out5 "vst1.s32 {d23[0]}, [%6]! \n" // out6 "vst1.s32 {d23[1]}, [%7]! \n" // out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8 * 9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8 * 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p / 8 + p % 8); for (int q = 0; q < inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n" //ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n" //r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n" //r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n" //r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n" //out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n" //(k0-k7) "sshll v1.8h, v1.8b, #0 \n" //(k8) "sshll v2.8h, v2.8b, #0 \n" // r0 "sshll v3.8h, v3.8b, #0 \n" // r1 "sshll v4.8h, v4.8b, #0 \n" // r2 "sshll v6.8h, v6.8b, #0 \n" // r3 "sshll v7.8h, v7.8b, #0 \n" // r4 "sshll v8.8h, v8.8b, #0 \n" // r5 "sshll v10.8h, v10.8b, #0 \n" // r6 "sshll v11.8h, v11.8b, #0 \n" // r7 "sshll v12.8h, v12.8b, #0 \n" // r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n" // out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n" // outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n" // out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n" // outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n" // out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n" // outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n" // out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n" // outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n" // out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n" // d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n" // d2(k8 ...) "vmovl.s8 q0, d0 \n" // d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n" // r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n" // d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n" // r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n" // d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n" // r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n" // d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n" // q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n" // d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n" // d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n" // d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n" // q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n" // q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n" // q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n" // q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n" // q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n" // q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n" // q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n" // q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n" // q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n" // k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n" // k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n" // k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n" // k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n" // k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n" // k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n" // k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n" // k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n" // k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp + 8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain > 0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
fixed_size_vector.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_CONTAINER_FIXED_SIZE_VECTOR_H_ #define CORE_CONTAINER_FIXED_SIZE_VECTOR_H_ #include <cassert> #include <cstddef> namespace bdm { /// Vector with fixed number of elements == Array with push_back function that /// keeps track of its size /// NB: No bounds checking. Do not push_back more often than the number of /// maximum elements given by the template parameter N template <typename T, std::size_t N> class FixedSizeVector { public: size_t size() const { return size_; } // NOLINT const T& operator[](size_t idx) const { return data_[idx]; } T& operator[](size_t idx) { return data_[idx]; } FixedSizeVector& operator++() { #pragma omp simd for (size_t i = 0; i < N; i++) { ++data_[i]; } return *this; } void clear() { size_ = 0; } // NOLINT void push_back(const T& value) { // NOLINT assert(size_ < N); data_[size_++] = value; } const T* begin() const { return &(data_[0]); } // NOLINT const T* end() const { return &(data_[size_]); } // NOLINT T* begin() { return &(data_[0]); } // NOLINT T* end() { return &(data_[size_]); } // NOLINT private: T data_[N]; std::size_t size_ = 0; }; } // namespace bdm #endif // CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
pointer2Array.c
// array types from a parameter list have to be converted to corresponding pointer types // to avoid segmentation fault. // Kernel is extracted from cg of npb2.3 omp c benchmarks. static int colidx[100]; static void makea (int colidx[]) { int i,j; #pragma omp parallel for private(i) for (i = 1; i <= 100; i++) colidx[i] = 0; } int main() { makea(colidx); }
omp_host_pinned_memory.c
// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda // REQUIRES: nvptx64-nvidia-cuda #include <omp.h> #include <stdio.h> // Allocate pinned memory on the host void *llvm_omp_target_alloc_host(size_t, int); int main() { const int N = 64; const int device = omp_get_default_device(); const int host = omp_get_initial_device(); int *hst_ptr = llvm_omp_target_alloc_host(N * sizeof(int), device); for (int i = 0; i < N; ++i) hst_ptr[i] = 2; #pragma omp target teams distribute parallel for device(device) \ map(tofrom:hst_ptr[0 : N]) for (int i = 0; i < N; ++i) hst_ptr[i] -= 1; int sum = 0; for (int i = 0; i < N; ++i) sum += hst_ptr[i]; omp_target_free(hst_ptr, device); // CHECK: PASS if (sum == N) printf ("PASS\n"); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(8*t3+Nx-5,64));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
DRB007-indirectaccess3-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have distance of 12 (p1 - p2 = 12). They are used as base addresses for indirect array accesses using an index set (another array). An index set has two indices with distance of 12 : indexSet[3]- indexSet[0] = 533 - 521 = 12 So there is loop carried dependence for N=0 and N=3. We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. N is 180, two iteraions with N=0 and N= 3 have loop carried dependences. For static even scheduling, we must have at least 60 threads (180/60=3 iterations) so iteration 0 and 3 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 533, 529, 531, // 521+12=533 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 12; int i; // initialize segments touched by indexSet #pragma omp parallel for for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); return 0; }
GrB_Matrix_serializeSize.c
//------------------------------------------------------------------------------ // GrB_Matrix_serializeSize: return an upper bound on the blob size //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // GrB_Matrix_serialize and GxB_Matrix_serialize both serialize a GrB_Matrix // into a blob of bytes. This function provides an estimate of the # of bytes // the blob would have, assuming the default method and default # of threads, // using the dryrun option in GB_serialize. #include "GB.h" #include "GB_serialize.h" GrB_Info GrB_Matrix_serializeSize // estimate the size of a blob ( // output: GrB_Index *blob_size_handle, // upper bound on the required size of the // blob on output. // input: GrB_Matrix A // matrix to serialize ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_serializeSize (&blob_size, A)") ; GB_BURBLE_START ("GrB_Matrix_serialize") ; GB_RETURN_IF_NULL (blob_size_handle) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; // no descriptor, so assume the default method int method = GxB_DEFAULT ; // Context will hold the default # of threads, which can be controlled // by GxB_Global_Option_set. //-------------------------------------------------------------------------- // serialize the matrix //-------------------------------------------------------------------------- size_t blob_size ; GrB_Info info = GB_serialize (NULL, &blob_size, A, method, Context) ; (*blob_size_handle) = (GrB_Index) blob_size ; GB_BURBLE_END ; #pragma omp flush return (info) ; }
GB_binop__isge_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int8) // A.*B function (eWiseMult): GB (_AemultB_08__isge_int8) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int8) // A.*B function (eWiseMult): GB (_AemultB_04__isge_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int8) // A*D function (colscale): GB (_AxD__isge_int8) // D*A function (rowscale): GB (_DxB__isge_int8) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int8) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int8) // C=scalar+B GB (_bind1st__isge_int8) // C=scalar+B' GB (_bind1st_tran__isge_int8) // C=A+scalar GB (_bind2nd__isge_int8) // C=A'+scalar GB (_bind2nd_tran__isge_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT8 || GxB_NO_ISGE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NavierStokesSolver_impl.h
// Copyright (c) 2004-2022 Tomáš Oberhuber et al. // // This file is part of TNL - Template Numerical Library (https://tnl-project.org/) // // SPDX-License-Identifier: MIT #pragma once #include <noa/3rdparty/tnl-noa/src/TNL/Solvers/cfd/navier-stokes/NavierStokesSolver.h> namespace noa::TNL { template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::NavierStokesSolver() : advection( 0 ), u1Viscosity( 0 ), u2Viscosity( 0 ), energyViscosity( 0 ), mu( 0.0 ), gravity( 0.0 ), R( 0.0 ), T( 0.0 ) {} template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setAdvectionScheme( AdvectionSchemeType& advection ) { this->advection = &advection; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setDiffusionScheme( DiffusionSchemeType& u1Viscosity, DiffusionSchemeType& u2Viscosity, DiffusionSchemeType& energyViscosity ) { this->u1Viscosity = &u1Viscosity; this->u2Viscosity = &u2Viscosity; this->energyViscosity = &energyViscosity; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setBoundaryConditions( BoundaryConditionsType& boundaryConditions ) { this->boundaryConditions = &boundaryConditions; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setMesh( MeshType& mesh ) { this->mesh = &mesh; this->rho.setSize( this->mesh->getDofs() ); this->u1.setSize( this->mesh->getDofs() ); this->u2.setSize( this->mesh->getDofs() ); this->u.setSize( this->mesh->getDofs() ); this->p.setSize( this->mesh->getDofs() ); this->energy.setSize( this->mesh->getDofs() ); this->rhsDofVector.setSize( this->getDofs() ); } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setMu( const RealType& mu ) { this->mu = mu; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::RealType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getMu() const { return this->mu; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setR( const RealType& R ) { this->R = R; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::RealType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getR() const { return this->R; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setT( const RealType& T ) { this->T = T; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::RealType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getT() const { return this->T; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setHeatCapacityRatio( const RealType& gamma ) { this->gamma = gamma; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::RealType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getHeatCapacityRatio() const { return this->gamma; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::setGravity( const RealType& gravity ) { this->gravity = gravity; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::RealType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getGravity() const { return this->gravity; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getRho() { return this->rho; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getRho() const { return this->rho; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getU1() { return this->u1; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getU1() const { return this->u1; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getU2() { return this->u2; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getU2() const { return this->u2; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getU() { return this->u; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getU() const { return this->u; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getPressure() { return this->p; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getPressure() const { return this->p; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getEnergy() { return this->energy; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > const typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::VectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getEnergy() const { return this->energy; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::IndexType NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getDofs() const { return 4 * this->mesh->getDofs(); } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::bindDofVector( RealType* data ) { this->dofVector.bind( data, this->getDofs() ); } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::DofVectorType& NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getDofVector() { return this->dofVector; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > template< typename Vector > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::updatePhysicalQuantities( const Vector& dofs_rho, const Vector& dofs_rho_u1, const Vector& dofs_rho_u2, const Vector& dofs_e ) { if( DeviceType ::getDevice() == Devices::HostDevice ) { const IndexType size = dofs_rho.getSize(); #ifdef HAVE_OPENMP #pragma omp parallel for, if( Devices::Host::isOMPEnabled() ) #endif for( IndexType c = 0; c < size; c++ ) { this->rho[ c ] = dofs_rho[ c ]; const RealType u1 = this->u1[ c ] = dofs_rho_u1[ c ] / dofs_rho[ c ]; const RealType u2 = this->u2[ c ] = dofs_rho_u2[ c ] / dofs_rho[ c ]; this->u[ c ] = ::sqrt( u1 * u1 + u2 * u2 ); // this->p[ c ] = dofs_rho[ c ] * this->R * this->T; this->p[ c ] = ( this->gamma - 1.0 ) * ( dofs_e[ c ] - 0.5 * this->rho[ c ] * ( this->u1[ c ] * this->u1[ c ] + this->u2[ c ] * this->u2[ c ] ) ); this->energy[ c ] = dofs_e[ c ]; // this->temperature[ c ] = this->p[ c ] / ( this->rho[ c ] * this->R ); } } } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > template< typename SolverVectorType > void NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::getExplicitUpdate( const RealType& time, const RealType& tau, SolverVectorType& u, SolverVectorType& fu ) { TNL_ASSERT_TRUE( this->advection, "advection scheme was not set" ); TNL_ASSERT_TRUE( this->u1Viscosity, "diffusion scheme was not set" ); TNL_ASSERT_TRUE( this->u2Viscosity, "diffusion scheme was not set" ); TNL_ASSERT_TRUE( this->boundaryConditions, "boundary conditions were not set" ); SharedVector< RealType, DeviceType, IndexType > dofs_rho, dofs_rho_u1, dofs_rho_u2, dofs_e, rho_t, rho_u1_t, rho_u2_t, e_t; const IndexType& dofs = this->mesh->getDofs(); dofs_rho.bind( &u.getData()[ 0 ], dofs ); dofs_rho_u1.bind( &u.getData()[ dofs ], dofs ); dofs_rho_u2.bind( &u.getData()[ 2 * dofs ], dofs ); dofs_e.bind( &u.getData()[ 3 * dofs ], dofs ); this->advection->setRho( dofs_rho ); this->advection->setRhoU1( dofs_rho_u1 ); this->advection->setRhoU2( dofs_rho_u2 ); this->advection->setE( dofs_e ); this->advection->setP( this->p ); this->energyViscosity->setFunction( this->energy ); rho_t.bind( &fu.getData()[ 0 ], dofs ); rho_u1_t.bind( &fu.getData()[ dofs ], dofs ); rho_u2_t.bind( &fu.getData()[ 2 * dofs ], dofs ); e_t.bind( &fu.getData()[ 3 * dofs ], dofs ); updatePhysicalQuantities( dofs_rho, dofs_rho_u1, dofs_rho_u2, dofs_e ); this->boundaryConditions->apply( time, tau, this->rho, this->u1, this->u2, this->energy ); const IndexType& xSize = this->mesh->getDimensions().x(); const IndexType& ySize = this->mesh->getDimensions().y(); if( DeviceType::getDevice() == Devices::HostDevice ) { for( IndexType i = 0; i < xSize; i++ ) { const IndexType c1 = mesh->getElementIndex( i, 0 ); const IndexType c2 = mesh->getElementIndex( i, 1 ); const IndexType c3 = mesh->getElementIndex( i, ySize - 1 ); const IndexType c4 = mesh->getElementIndex( i, ySize - 2 ); dofs_rho[ c1 ] = this->rho[ c1 ]; dofs_rho_u1[ c1 ] = this->rho[ c1 ] * this->u1[ c1 ]; dofs_rho_u2[ c1 ] = this->rho[ c1 ] * this->u2[ c1 ]; dofs_e[ c1 ] = this->energy[ c1 ]; /*dofs_e[ c1 ] = this->computeEnergy( this->rho[ c1 ], this->temperature[ c1 ], this->gamma, this->u1[ c1 ], this->u2[ c1 ] );*/ dofs_rho[ c3 ] = this->rho[ c3 ]; dofs_rho_u1[ c3 ] = this->rho[ c3 ] * this->u1[ c3 ]; dofs_rho_u2[ c3 ] = this->rho[ c3 ] * this->u2[ c3 ]; dofs_e[ c3 ] = this->energy[ c3 ]; /*dofs_e[ c3 ] = this->computeEnergy( this->rho[ c3 ], this->temperature[ c3 ], this->gamma, this->u1[ c3 ], this->u2[ c3 ] );*/ } for( IndexType j = 0; j < ySize; j++ ) { const IndexType c1 = mesh->getElementIndex( 0, j ); const IndexType c2 = mesh->getElementIndex( 1, j ); const IndexType c3 = mesh->getElementIndex( xSize - 1, j ); const IndexType c4 = mesh->getElementIndex( xSize - 2, j ); dofs_rho[ c1 ] = this->rho[ c1 ]; dofs_rho_u1[ c1 ] = this->rho[ c1 ] * this->u1[ c1 ]; dofs_rho_u2[ c1 ] = this->rho[ c1 ] * this->u2[ c1 ]; dofs_e[ c1 ] = this->energy[ c1 ]; /*dofs_e[ c1 ] = this->computeEnergy( this->rho[ c1 ], this->temperature[ c1 ], this->gamma, this->u1[ c1 ], this->u2[ c1 ] );*/ dofs_rho[ c3 ] = this->rho[ c3 ]; dofs_rho_u1[ c3 ] = this->rho[ c3 ] * this->u1[ c3 ]; dofs_rho_u2[ c3 ] = this->rho[ c3 ] * this->u2[ c3 ]; dofs_e[ c3 ] = this->energy[ c3 ]; /*dofs_e[ c3 ] = this->computeEnergy( this->rho[ c3 ], this->temperature[ c3 ], this->gamma, this->u1[ c3 ], this->u2[ c3 ] );*/ } } writePhysicalVariables( time, -4 ); #ifdef HAVE_OPENMP #pragma omp parallel for, if( Devices::Host::isOMPEnabled() ) #endif for( IndexType j = 0; j < ySize; j++ ) for( IndexType i = 0; i < xSize; i++ ) { IndexType c = this->mesh->getElementIndex( i, j ); if( i == 0 || j == 0 || i == xSize - 1 || j == ySize - 1 ) { rho_t[ c ] = rho_u1_t[ c ] = rho_u2_t[ c ] = e_t[ c ] = 0.0; continue; } this->advection->getExplicitUpdate( c, rho_t[ c ], rho_u1_t[ c ], rho_u2_t[ c ], e_t[ c ], tau ); // rho_u1_t[ c ] += ; // rho_u2_t[ c ] -= startUpCoefficient * this->gravity * this->rho[ c ]; /*** * Add the viscosity term */ /*rho_u1_t[ c ] += this->mu*( u1Viscosity->getDiffusion( c, 4.0/3.0, 1.0, 0.0 ) + u2Viscosity->getDiffusion( c, 0.0, 0.0, 1.0/3.0 ) ); rho_u2_t[ c ] += this->mu*( u2Viscosity->getDiffusion( c, 1.0, 4.0/3.0, 0.0 ) + u1Viscosity->getDiffusion( c, 0.0, 0.0, 1.0/3.0 ) ); RealType k = 2.495*pow( 400.0, 1.5 ) / ( 400.0 + 194.0 ); e_t[ c ] += this->mu*( u1Viscosity->getDiffusion( c, this->u1, this->u1, this->u2, 4.0/3.0, 1.0, -2.0/3.0 ) + u1Viscosity->getDiffusion( c, this->u1, this->u1, this->u2, 0.0, 0.0, 1.0 ) + u2Viscosity->getDiffusion( c, this->u2, this->u2, this->u1, 1.0, 4.0/3.0, -2.0/3.0 ) + u2Viscosity->getDiffusion( c, this->u2, this->u2, this->u1, 0.0, 0.0, 1.0 ) + k * energyViscosity->getDiffusion( c, 1.0, 1.0, 0.0 ) ); */ rho_u1_t[ c ] += this->mu * ( u1Viscosity->getDiffusion( c, 1.0, 1.0, 0.0 ) ); rho_u2_t[ c ] += this->mu * ( u2Viscosity->getDiffusion( c, 1.0, 1.0, 0.0 ) ); RealType k = 2.495 * pow( 400.0, 1.5 ) / ( 400.0 + 194.0 ); // cout << k << std::endl; /*e_t[ c ] += this->mu*( u1Viscosity->getDiffusion( c, this->u1, this->u1, this->u2, 1.0, 1.0, 0.0 ) + u2Viscosity->getDiffusion( c, this->u2, this->u2, this->u1, 1.0, 1.0, -0.0 ) );*/ // energyViscosity->getDiffusion( c, 1.0, 1.0, 0.0 ) ); // e_t[ c ] = 0.0; } } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > bool NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::writePhysicalVariables( const RealType& t, const IndexType step ) { SharedVector< RealType, DeviceType, IndexType > dofs_rho, dofs_rho_u1, dofs_rho_u2, dofs_e; const IndexType& dofs = mesh->getDofs(); dofs_rho.bind( &dofVector.getData()[ 0 ], dofs ); dofs_rho_u1.bind( &dofVector.getData()[ dofs ], dofs ); dofs_rho_u2.bind( &dofVector.getData()[ 2 * dofs ], dofs ); dofs_e.bind( &dofVector.getData()[ 3 * dofs ], dofs ); this->updatePhysicalQuantities( dofs_rho, dofs_rho_u1, dofs_rho_u2, dofs_e ); Vector< StaticVector< 2, RealType >, DeviceType, IndexType > u; u.setLike( u1 ); String fileName; for( IndexType i = 0; i < this->u1.getSize(); i++ ) u[ i ] = StaticVector< 2, RealType >( this->u1[ i ], this->u2[ i ] ); FileNameBaseNumberEnding( "u-", step, 5, ".tnl", fileName ); if( ! u.save( fileName ) ) return false; FileNameBaseNumberEnding( "rho-", step, 5, ".tnl", fileName ); if( ! this->rho.save( fileName ) ) return false; FileNameBaseNumberEnding( "p-", step, 5, ".tnl", fileName ); if( ! this->p.save( fileName ) ) return false; FileNameBaseNumberEnding( "e-", step, 5, ".tnl", fileName ); if( ! this->energy.save( fileName ) ) return false; return true; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > bool NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::writeConservativeVariables( const RealType& t, const IndexType step ) { SharedVector< RealType, DeviceType, IndexType > dofs_rho, dofs_rho_u1, dofs_rho_u2, dofs_e; const IndexType& dofs = mesh->getDofs(); dofs_rho.bind( &dofVector.getData()[ 0 ], dofs ); dofs_rho_u1.bind( &dofVector.getData()[ dofs ], dofs ); dofs_rho_u2.bind( &dofVector.getData()[ 2 * dofs ], dofs ); dofs_e.bind( &dofVector.getData()[ 3 * dofs ], dofs ); String fileName; FileNameBaseNumberEnding( "rho-", step, 5, ".tnl", fileName ); if( ! dofs_rho.save( fileName ) ) return false; FileNameBaseNumberEnding( "rho-u1-", step, 5, ".tnl", fileName ); if( ! dofs_rho_u1.save( fileName ) ) return false; FileNameBaseNumberEnding( "rho-u2-", step, 5, ".tnl", fileName ); if( ! dofs_rho_u2.save( fileName ) ) return false; FileNameBaseNumberEnding( "e-", step, 5, ".tnl", fileName ); if( ! dofs_e.save( fileName ) ) return false; return true; } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > typename NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::RealType NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::computeEnergy( const RealType& rho, const RealType& pressure, const RealType& gamma, const RealType& u1, const RealType& u2 ) const { /*return rho * this->R * temperature / ( gamma - 1.0 ) + 0.5 * rho * ( u1*u1 + u2*u2 );*/ return pressure / ( gamma - 1.0 ) + 0.5 * rho * ( u1 * u1 + u2 * u2 ); } template< typename AdvectionScheme, typename DiffusionScheme, typename BoundaryConditions > template< typename DofVector > bool NavierStokesSolver< AdvectionScheme, DiffusionScheme, BoundaryConditions >::writeExplicitRhs( const RealType& t, const IndexType step, DofVector& rhs ) { SharedVector< RealType, DeviceType, IndexType > dofs_rho, dofs_rho_u1, dofs_rho_u2, dofs_e; const IndexType& dofs = mesh->getDofs(); dofs_rho.bind( &rhs.getData()[ 0 ], dofs ); dofs_rho_u1.bind( &rhs.getData()[ dofs ], dofs ); dofs_rho_u2.bind( &rhs.getData()[ 2 * dofs ], dofs ); dofs_e.bind( &rhs.getData()[ 3 * dofs ], dofs ); String fileName; FileNameBaseNumberEnding( "rho-t-", step, 5, ".tnl", fileName ); if( ! dofs_rho.save( fileName ) ) return false; FileNameBaseNumberEnding( "rho-u1-t-", step, 5, ".tnl", fileName ); if( ! dofs_rho_u1.save( fileName ) ) return false; FileNameBaseNumberEnding( "rho-u2-t-", step, 5, ".tnl", fileName ); if( ! dofs_rho_u2.save( fileName ) ) return false; FileNameBaseNumberEnding( "e-t-", step, 5, ".tnl", fileName ); if( ! dofs_e.save( fileName ) ) return false; return true; } } // namespace noa::TNL
matrixmultiply2.c
/* Naive matrix-matrix multiplication(mmm) By C. Liao */ #define N 1000 #define M 1000 #define K 1000 int i,j,k; double a[N][M],b[M][K],c[N][K]; int mmm() { //#pragma omp parallel for private(i,j,k) shared(a,b,c) for (i = 0; i < N; i++) for (k = 0; k < K; k++) for (j = 0; j < M; j++) c[i][j]= c[i][j]+a[i][k]*b[k][j]; return 0; }
GB_binop__max_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint16) // A*D function (colscale): GB (_AxD__max_uint16) // D*A function (rowscale): GB (_DxB__max_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__max_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__max_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint16) // C=scalar+B GB (_bind1st__max_uint16) // C=scalar+B' GB (_bind1st_tran__max_uint16) // C=A+scalar GB (_bind2nd__max_uint16) // C=A'+scalar GB (_bind2nd_tran__max_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_UINT16 || GxB_NO_MAX_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
UMESimdVecFloatPrototype.h
// The MIT License (MIT) // // Copyright (c) 2015-2017 CERN // // Author: Przemyslaw Karpinski // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // // This piece of code was developed as part of ICE-DIP project at CERN. // "ICE-DIP is a European Industrial Doctorate project funded by the European Community's // 7th Framework programme Marie Curie Actions under grant PITN-GA-2012-316596". // #ifndef UME_SIMD_VEC_FLOAT_PROTOTYPE_H_ #define UME_SIMD_VEC_FLOAT_PROTOTYPE_H_ #include <type_traits> #include "../../../UMESimdInterface.h" #include "../UMESimdMask.h" #include "../UMESimdSwizzle.h" #include "../UMESimdVecUint.h" namespace UME { namespace SIMD { template<typename SCALAR_FLOAT_TYPE, uint32_t VEC_LEN> struct SIMDVec_f_traits { // Generic trait class not containing type definition so that only correct explicit // type definitions are compiled correctly }; // 32b vectors template<> struct SIMDVec_f_traits<float, 1> { typedef NullType<1> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint32_t, 1> VEC_UINT_TYPE; typedef SIMDVec_i<int32_t, 1> VEC_INT_TYPE; typedef int32_t SCALAR_INT_TYPE; typedef uint32_t SCALAR_UINT_TYPE; typedef float* SCALAR_TYPE_PTR; typedef SIMDVecMask<1> MASK_TYPE; typedef SIMDSwizzle<1> SWIZZLE_MASK_TYPE; typedef NullType<2> SCALAR_FLOAT_LOWER_PRECISION; typedef double SCALAR_FLOAT_HIGHER_PRECISION; }; // 64b vectors template<> struct SIMDVec_f_traits<float, 2> { typedef SIMDVec_f<float, 1> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint32_t, 2> VEC_UINT_TYPE; typedef SIMDVec_i<int32_t, 2> VEC_INT_TYPE; typedef int32_t SCALAR_INT_TYPE; typedef uint32_t SCALAR_UINT_TYPE; typedef float* SCALAR_TYPE_PTR; typedef SIMDVecMask<2> MASK_TYPE; typedef SIMDSwizzle<2> SWIZZLE_MASK_TYPE; typedef NullType<2> SCALAR_FLOAT_LOWER_PRECISION; typedef double SCALAR_FLOAT_HIGHER_PRECISION; }; template<> struct SIMDVec_f_traits<double, 1> { typedef NullType<1> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint64_t, 1> VEC_UINT_TYPE; typedef SIMDVec_i<int64_t, 1> VEC_INT_TYPE; typedef int64_t SCALAR_INT_TYPE; typedef uint64_t SCALAR_UINT_TYPE; typedef double* SCALAR_TYPE_PTR; typedef SIMDVecMask<1> MASK_TYPE; typedef SIMDSwizzle<1> SWIZZLE_MASK_TYPE; typedef float SCALAR_FLOAT_LOWER_PRECISION; typedef NullType<2> SCALAR_FLOAT_HIGHER_PRECISION; }; // 128b vectors template<> struct SIMDVec_f_traits<float, 4> { typedef SIMDVec_f<float, 2> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint32_t, 4> VEC_UINT_TYPE; typedef SIMDVec_i<int32_t, 4> VEC_INT_TYPE; typedef int32_t SCALAR_INT_TYPE; typedef uint32_t SCALAR_UINT_TYPE; typedef float* SCALAR_TYPE_PTR; typedef SIMDVecMask<4> MASK_TYPE; typedef SIMDSwizzle<4> SWIZZLE_MASK_TYPE; typedef NullType<2> SCALAR_FLOAT_LOWER_PRECISION; typedef double SCALAR_FLOAT_HIGHER_PRECISION; }; template<> struct SIMDVec_f_traits<double, 2> { typedef SIMDVec_f<double, 1> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint64_t, 2> VEC_UINT_TYPE; typedef SIMDVec_i<int64_t, 2> VEC_INT_TYPE; typedef int64_t SCALAR_INT_TYPE; typedef uint64_t SCALAR_UINT_TYPE; typedef double* SCALAR_TYPE_PTR; typedef SIMDVecMask<2> MASK_TYPE; typedef SIMDSwizzle<2> SWIZZLE_MASK_TYPE; typedef float SCALAR_FLOAT_LOWER_PRECISION; typedef NullType<2> SCALAR_FLOAT_HIGHER_PRECISION; }; // 256b vectors template<> struct SIMDVec_f_traits<float, 8> { typedef SIMDVec_f<float, 4> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint64_t, 8> VEC_UINT_TYPE; typedef SIMDVec_i<int32_t, 8> VEC_INT_TYPE; typedef int32_t SCALAR_INT_TYPE; typedef uint32_t SCALAR_UINT_TYPE; typedef float* SCALAR_TYPE_PTR; typedef SIMDVecMask<8> MASK_TYPE; typedef SIMDSwizzle<8> SWIZZLE_MASK_TYPE; typedef NullType<2> SCALAR_FLOAT_LOWER_PRECISION; typedef double SCALAR_FLOAT_HIGHER_PRECISION; }; template<> struct SIMDVec_f_traits<double, 4> { typedef SIMDVec_f<double, 2> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint64_t, 4> VEC_UINT_TYPE; typedef SIMDVec_i<int64_t, 4> VEC_INT_TYPE; typedef int64_t SCALAR_INT_TYPE; typedef uint64_t SCALAR_UINT_TYPE; typedef double* SCALAR_TYPE_PTR; typedef SIMDVecMask<4> MASK_TYPE; typedef SIMDSwizzle<4> SWIZZLE_MASK_TYPE; typedef float SCALAR_FLOAT_LOWER_PRECISION; typedef NullType<2> SCALAR_FLOAT_HIGHER_PRECISION; }; // 512b vectors template<> struct SIMDVec_f_traits<float, 16> { typedef SIMDVec_f<float, 8> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint32_t, 16> VEC_UINT_TYPE; typedef SIMDVec_i<int32_t, 16> VEC_INT_TYPE; typedef int32_t SCALAR_INT_TYPE; typedef uint32_t SCALAR_UINT_TYPE; typedef float* SCALAR_TYPE_PTR; typedef SIMDVecMask<16> MASK_TYPE; typedef SIMDSwizzle<16> SWIZZLE_MASK_TYPE; typedef NullType<2> SCALAR_FLOAT_LOWER_PRECISION; typedef double SCALAR_FLOAT_HIGHER_PRECISION; }; template<> struct SIMDVec_f_traits<double, 8> { typedef SIMDVec_f<double, 4> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint64_t, 8> VEC_UINT_TYPE; typedef SIMDVec_i<int64_t, 8> VEC_INT_TYPE; typedef int64_t SCALAR_INT_TYPE; typedef uint64_t SCALAR_UINT_TYPE; typedef double* SCALAR_TYPE_PTR; typedef SIMDVecMask<8> MASK_TYPE; typedef SIMDSwizzle<8> SWIZZLE_MASK_TYPE; typedef float SCALAR_FLOAT_LOWER_PRECISION; typedef NullType<2> SCALAR_FLOAT_HIGHER_PRECISION; }; // 1024b vectors template<> struct SIMDVec_f_traits<float, 32> { typedef SIMDVec_f<float, 16> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint32_t, 32> VEC_UINT_TYPE; typedef SIMDVec_i<int32_t, 32> VEC_INT_TYPE; typedef int32_t SCALAR_INT_TYPE; typedef uint32_t SCALAR_UINT_TYPE; typedef float* SCALAR_TYPE_PTR; typedef SIMDVecMask<32> MASK_TYPE; typedef SIMDSwizzle<32> SWIZZLE_MASK_TYPE; typedef NullType<2> SCALAR_FLOAT_LOWER_PRECISION; typedef NullType<3> SCALAR_FLOAT_HIGHER_PRECISION; }; template<> struct SIMDVec_f_traits<double, 16> { typedef SIMDVec_f<double, 8> HALF_LEN_VEC_TYPE; typedef SIMDVec_u<uint64_t, 16> VEC_UINT_TYPE; typedef SIMDVec_i<int64_t, 16> VEC_INT_TYPE; typedef int64_t SCALAR_INT_TYPE; typedef uint64_t SCALAR_UINT_TYPE; typedef double* SCALAR_TYPE_PTR; typedef SIMDVecMask<16> MASK_TYPE; typedef SIMDSwizzle<16> SWIZZLE_MASK_TYPE; typedef float SCALAR_FLOAT_LOWER_PRECISION; typedef NullType<2> SCALAR_FLOAT_HIGHER_PRECISION; }; // *************************************************************************** // * // * Implementation of floating point types SIMDx_32f and SIMDx_64f. // * // * This implementation uses scalar emulation available through to // * SIMDVecFloatInterface. // * // *************************************************************************** template<typename SCALAR_FLOAT_TYPE, uint32_t VEC_LEN> class SIMDVec_f : public SIMDVecFloatInterface< SIMDVec_f<SCALAR_FLOAT_TYPE, VEC_LEN>, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::VEC_UINT_TYPE, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::VEC_INT_TYPE, SCALAR_FLOAT_TYPE, VEC_LEN, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SCALAR_UINT_TYPE, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SCALAR_INT_TYPE, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::MASK_TYPE, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SWIZZLE_MASK_TYPE>, public SIMDVecPackableInterface< SIMDVec_f<SCALAR_FLOAT_TYPE, VEC_LEN>, typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::HALF_LEN_VEC_TYPE> { public: typedef SIMDVecEmuRegister<SCALAR_FLOAT_TYPE, VEC_LEN> VEC_EMU_REG; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SCALAR_UINT_TYPE SCALAR_UINT_TYPE; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SCALAR_INT_TYPE SCALAR_INT_TYPE; typedef SIMDVec_f VEC_TYPE; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::VEC_UINT_TYPE VEC_UINT_TYPE; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::VEC_INT_TYPE VEC_INT_TYPE; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::MASK_TYPE MASK_TYPE; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SCALAR_FLOAT_LOWER_PRECISION SCALAR_FLOAT_LOWER_PRECISION; typedef typename SIMDVec_f_traits<SCALAR_FLOAT_TYPE, VEC_LEN>::SCALAR_FLOAT_HIGHER_PRECISION SCALAR_FLOAT_HIGHER_PRECISION; public: constexpr static uint32_t alignment() { return VEC_LEN*sizeof(SCALAR_FLOAT_TYPE); } public: // private: alignas(alignment()) SCALAR_FLOAT_TYPE mVec[VEC_LEN]; public: // ZERO-CONSTR UME_FORCE_INLINE SIMDVec_f() : mVec() {}; // SET-CONSTR UME_FORCE_INLINE SIMDVec_f(SCALAR_FLOAT_TYPE x) { SCALAR_FLOAT_TYPE *local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for (unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = x; } } // This constructor is used to force types other than SCALAR_TYPES // to be promoted to SCALAR_TYPE instead of SCALAR_TYPE*. This prevents // ambiguity between SET-CONSTR and LOAD-CONSTR. template<typename T> UME_FORCE_INLINE SIMDVec_f( T i, typename std::enable_if< std::is_fundamental<T>::value && !std::is_same<T, SCALAR_FLOAT_TYPE>::value, void*>::type = nullptr) : SIMDVec_f(static_cast<SCALAR_FLOAT_TYPE>(i)) {} // LOAD-CONSTR UME_FORCE_INLINE explicit SIMDVec_f(SCALAR_FLOAT_TYPE const * p) { this->load(p); } UME_FORCE_INLINE SIMDVec_f(SCALAR_FLOAT_TYPE f0, SCALAR_FLOAT_TYPE f1) { mVec[0] = f0; mVec[1] = f1; } UME_FORCE_INLINE SIMDVec_f( SCALAR_FLOAT_TYPE f0, SCALAR_FLOAT_TYPE f1, SCALAR_FLOAT_TYPE f2, SCALAR_FLOAT_TYPE f3) { mVec[0] = f0; mVec[1] = f1; mVec[2] = f2; mVec[3] = f3; } UME_FORCE_INLINE SIMDVec_f( SCALAR_FLOAT_TYPE f0, SCALAR_FLOAT_TYPE f1, SCALAR_FLOAT_TYPE f2, SCALAR_FLOAT_TYPE f3, SCALAR_FLOAT_TYPE f4, SCALAR_FLOAT_TYPE f5, SCALAR_FLOAT_TYPE f6, SCALAR_FLOAT_TYPE f7) { mVec[0] = f0; mVec[1] = f1; mVec[2] = f2; mVec[3] = f3; mVec[4] = f4; mVec[5] = f5; mVec[6] = f6; mVec[7] = f7; } UME_FORCE_INLINE SIMDVec_f( SCALAR_FLOAT_TYPE f0, SCALAR_FLOAT_TYPE f1, SCALAR_FLOAT_TYPE f2, SCALAR_FLOAT_TYPE f3, SCALAR_FLOAT_TYPE f4, SCALAR_FLOAT_TYPE f5, SCALAR_FLOAT_TYPE f6, SCALAR_FLOAT_TYPE f7, SCALAR_FLOAT_TYPE f8, SCALAR_FLOAT_TYPE f9, SCALAR_FLOAT_TYPE f10, SCALAR_FLOAT_TYPE f11, SCALAR_FLOAT_TYPE f12, SCALAR_FLOAT_TYPE f13, SCALAR_FLOAT_TYPE f14, SCALAR_FLOAT_TYPE f15) { mVec[0] = f0; mVec[1] = f1; mVec[2] = f2; mVec[3] = f3; mVec[4] = f4; mVec[5] = f5; mVec[6] = f6; mVec[7] = f7; mVec[8] = f8; mVec[9] = f9; mVec[10] = f10; mVec[11] = f11; mVec[12] = f12; mVec[13] = f13; mVec[14] = f14; mVec[15] = f15; } UME_FORCE_INLINE SIMDVec_f( SCALAR_FLOAT_TYPE f0, SCALAR_FLOAT_TYPE f1, SCALAR_FLOAT_TYPE f2, SCALAR_FLOAT_TYPE f3, SCALAR_FLOAT_TYPE f4, SCALAR_FLOAT_TYPE f5, SCALAR_FLOAT_TYPE f6, SCALAR_FLOAT_TYPE f7, SCALAR_FLOAT_TYPE f8, SCALAR_FLOAT_TYPE f9, SCALAR_FLOAT_TYPE f10, SCALAR_FLOAT_TYPE f11, SCALAR_FLOAT_TYPE f12, SCALAR_FLOAT_TYPE f13, SCALAR_FLOAT_TYPE f14, SCALAR_FLOAT_TYPE f15, SCALAR_FLOAT_TYPE f16, SCALAR_FLOAT_TYPE f17, SCALAR_FLOAT_TYPE f18, SCALAR_FLOAT_TYPE f19, SCALAR_FLOAT_TYPE f20, SCALAR_FLOAT_TYPE f21, SCALAR_FLOAT_TYPE f22, SCALAR_FLOAT_TYPE f23, SCALAR_FLOAT_TYPE f24, SCALAR_FLOAT_TYPE f25, SCALAR_FLOAT_TYPE f26, SCALAR_FLOAT_TYPE f27, SCALAR_FLOAT_TYPE f28, SCALAR_FLOAT_TYPE f29, SCALAR_FLOAT_TYPE f30, SCALAR_FLOAT_TYPE f31) { mVec[0] = f0; mVec[1] = f1; mVec[2] = f2; mVec[3] = f3; mVec[4] = f4; mVec[5] = f5; mVec[6] = f6; mVec[7] = f7; mVec[8] = f8; mVec[9] = f9; mVec[10] = f10; mVec[11] = f11; mVec[12] = f12; mVec[13] = f13; mVec[14] = f14; mVec[15] = f15; mVec[16] = f16; mVec[17] = f17; mVec[18] = f18; mVec[19] = f19; mVec[20] = f20; mVec[21] = f21; mVec[22] = f22; mVec[23] = f23; mVec[24] = f24; mVec[25] = f25; mVec[26] = f26; mVec[27] = f27; mVec[28] = f28; mVec[29] = f29; mVec[30] = f30; mVec[31] = f31; } // EXTRACT UME_FORCE_INLINE SCALAR_FLOAT_TYPE extract(uint32_t index) const { return mVec[index]; } UME_FORCE_INLINE SCALAR_FLOAT_TYPE operator[] (uint32_t index) const { return extract(index); } // INSERT UME_FORCE_INLINE SIMDVec_f & insert(uint32_t index, SCALAR_FLOAT_TYPE value) { mVec[index] = value; return *this; } UME_FORCE_INLINE IntermediateIndex<SIMDVec_f, SCALAR_FLOAT_TYPE> operator[] (uint32_t index) { return IntermediateIndex<SIMDVec_f, SCALAR_FLOAT_TYPE>(index, static_cast<SIMDVec_f &>(*this)); } // Override Mask Access operators #if defined(USE_PARENTHESES_IN_MASK_ASSIGNMENT) UME_FORCE_INLINE IntermediateMask<SIMDVec_f, SCALAR_FLOAT_TYPE, MASK_TYPE> operator() (MASK_TYPE const & mask) { return IntermediateMask<SIMDVec_f, SCALAR_FLOAT_TYPE, MASK_TYPE>(mask, static_cast<SIMDVec_f &>(*this)); } #else UME_FORCE_INLINE IntermediateMask<SIMDVec_f, SCALAR_FLOAT_TYPE, MASK_TYPE> operator[] (MASK_TYPE const & mask) { return IntermediateMask<SIMDVec_f, SCALAR_FLOAT_TYPE, MASK_TYPE>(mask, static_cast<SIMDVec_f &>(*this)); } #endif // ASSIGNV UME_FORCE_INLINE SIMDVec_f & assign(SIMDVec_f const & src) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_src_ptr = &src.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = local_src_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator= (SIMDVec_f const & b) { return this->assign(b); } // MASSIGNV UME_FORCE_INLINE SIMDVec_f & assign(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & src) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_src_ptr = &src.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if (local_mask_ptr[i] == true) local_ptr[i] = local_src_ptr[i]; } return *this; } // ASSIGNS UME_FORCE_INLINE SIMDVec_f & assign(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = b; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator= (SCALAR_FLOAT_TYPE b) { return this->assign(b); } // MASSIGNS UME_FORCE_INLINE SIMDVec_f & assign(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if (local_mask_ptr[i] == true) local_ptr[i] = b; } return *this; } // PREFETCH0 // PREFETCH1 // PREFETCH2 // LOAD UME_FORCE_INLINE SIMDVec_f & load(SCALAR_FLOAT_TYPE const *p) { SCALAR_FLOAT_TYPE *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const *local_p_ptr = &p[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = local_p_ptr[i]; } return *this; } // MLOAD UME_FORCE_INLINE SIMDVec_f & load(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE const *p) { SCALAR_FLOAT_TYPE *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const *local_p_ptr = &p[0]; bool const *local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if (local_mask_ptr[i] == true) local_ptr[i] = local_p_ptr[i]; } return *this; } // LOADA UME_FORCE_INLINE SIMDVec_f & loada(SCALAR_FLOAT_TYPE const *p) { SCALAR_FLOAT_TYPE *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const *local_p_ptr = &p[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = local_p_ptr[i]; } return *this; } // MLOADA UME_FORCE_INLINE SIMDVec_f & loada(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE const *p) { SCALAR_FLOAT_TYPE *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const *local_p_ptr = &p[0]; bool const *local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if (local_mask_ptr[i] == true) local_ptr[i] = local_p_ptr[i]; } return *this; } // STORE UME_FORCE_INLINE SCALAR_FLOAT_TYPE* store(SCALAR_FLOAT_TYPE* p) const { SCALAR_FLOAT_TYPE const *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE *local_p_ptr = &p[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_p_ptr[i] = local_ptr[i]; } return p; } // MSTORE UME_FORCE_INLINE SCALAR_FLOAT_TYPE* store(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE* p) const { SCALAR_FLOAT_TYPE const *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE *local_p_ptr = &p[0]; bool const *local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if (local_mask_ptr[i] == true) local_p_ptr[i] = local_ptr[i]; } return p; } // STOREA UME_FORCE_INLINE SCALAR_FLOAT_TYPE* storea(SCALAR_FLOAT_TYPE* p) const { SCALAR_FLOAT_TYPE const *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE *local_p_ptr = &p[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_p_ptr[i] = local_ptr[i]; } return p; } // MSTOREA UME_FORCE_INLINE SCALAR_FLOAT_TYPE* storea(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE* p) const { SCALAR_FLOAT_TYPE const *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE *local_p_ptr = &p[0]; bool const *local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if (local_mask_ptr[i] == true) local_p_ptr[i] = local_ptr[i]; } return p; } // BLENDV UME_FORCE_INLINE SIMDVec_f blend(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE *retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const *local_b_ptr = &b.mVec[0]; bool const *local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) retval_ptr[i] = local_b_ptr[i]; else retval_ptr[i] = local_ptr[i]; } return retval; } // BLENDS UME_FORCE_INLINE SIMDVec_f blend(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE const *local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE *retval_ptr = &retval.mVec[0]; bool const *local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) retval_ptr[i] = b; else retval_ptr[i] = local_ptr[i]; } return retval; } // SWIZZLE // SWIZZLEA // ADDV UME_FORCE_INLINE SIMDVec_f add(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] + local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVec_f operator+ (SIMDVec_f const & b) const { return add(b); } // MADDV UME_FORCE_INLINE SIMDVec_f add(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] + local_b_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // ADDS UME_FORCE_INLINE SIMDVec_f add(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] + b; } return retval; } UME_FORCE_INLINE SIMDVec_f operator+ (SCALAR_FLOAT_TYPE b) const { return add(b); } // MADDS UME_FORCE_INLINE SIMDVec_f add(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] + b; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // ADDVA UME_FORCE_INLINE SIMDVec_f & adda(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] += local_b_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator+= (SIMDVec_f const & b) { return adda(b); } // MADDVA UME_FORCE_INLINE SIMDVec_f & adda(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] += local_b_ptr[i]; } return *this; } // ADDSA UME_FORCE_INLINE SIMDVec_f & adda(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] += b; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator+= (SCALAR_FLOAT_TYPE b) { return adda(b); } // MADDSA UME_FORCE_INLINE SIMDVec_f & adda(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] += b; } return *this; } // SADDV // MSADDV // SADDS // MSADDS // SADDVA // MSADDVA // SADDSA // MSADDSA // POSTINC UME_FORCE_INLINE SIMDVec_f postinc() { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i]++; } return retval; } UME_FORCE_INLINE SIMDVec_f operator++ (int) { return postinc(); } // MPOSTINC UME_FORCE_INLINE SIMDVec_f postinc(SIMDVecMask<VEC_LEN> const & mask) { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i]++; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // PREFINC UME_FORCE_INLINE SIMDVec_f & prefinc() { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { ++local_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator++ () { return prefinc(); } // MPREFINC UME_FORCE_INLINE SIMDVec_f & prefinc(SIMDVecMask<VEC_LEN> const & mask) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) ++local_ptr[i]; } return *this; } // SUBV UME_FORCE_INLINE SIMDVec_f sub(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] - local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVec_f operator- (SIMDVec_f const & b) const { return sub(b); } // MSUBV UME_FORCE_INLINE SIMDVec_f sub(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] - local_b_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // SUBS UME_FORCE_INLINE SIMDVec_f sub(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] - b; } return retval; } UME_FORCE_INLINE SIMDVec_f operator- (SCALAR_FLOAT_TYPE b) const { return sub(b); } // MSUBS UME_FORCE_INLINE SIMDVec_f sub(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] - b; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // SUBVA UME_FORCE_INLINE SIMDVec_f & suba(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] -= local_b_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator-= (SIMDVec_f const & b) { return suba(b); } // MSUBVA UME_FORCE_INLINE SIMDVec_f & suba(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] -= local_b_ptr[i]; } return *this; } // SUBSA UME_FORCE_INLINE SIMDVec_f & suba(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] -= b; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator-= (SCALAR_FLOAT_TYPE b) { return suba(b); } // MSUBSA UME_FORCE_INLINE SIMDVec_f & suba(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] -= b; } return *this; } // SSUBV // MSSUBV // SSUBS // MSSUBS // SSUBVA // MSSUBVA // SSUBSA // MSSUBSA // SUBFROMV UME_FORCE_INLINE SIMDVec_f subfrom(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_b_ptr[i] - local_ptr[i]; } return retval; } // MSUBFROMV UME_FORCE_INLINE SIMDVec_f subfrom(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_b_ptr[i] - local_ptr[i]; else local_retval_ptr[i] = local_b_ptr[i]; } return retval; } // SUBFROMS UME_FORCE_INLINE SIMDVec_f subfrom(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = b - local_ptr[i]; } return retval; } // MSUBFROMS UME_FORCE_INLINE SIMDVec_f subfrom(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = b - local_ptr[i]; else local_retval_ptr[i] = b; } return retval; } // SUBFROMVA UME_FORCE_INLINE SIMDVec_f & subfroma(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = local_b_ptr[i] - local_ptr[i]; } return *this; } // MSUBFROMVA UME_FORCE_INLINE SIMDVec_f & subfroma(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] = local_b_ptr[i] - local_ptr[i]; else local_ptr[i] = local_b_ptr[i]; } return *this; } // SUBFROMSA UME_FORCE_INLINE SIMDVec_f & subfroma(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = b - local_ptr[i]; } return *this; } // MSUBFROMSA UME_FORCE_INLINE SIMDVec_f & subfroma(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] = b - local_ptr[i]; else local_ptr[i] = b; } return *this; } // POSTDEC UME_FORCE_INLINE SIMDVec_f postdec() { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i]--; } return retval; } UME_FORCE_INLINE SIMDVec_f operator-- (int) { return postdec(); } // MPOSTDEC UME_FORCE_INLINE SIMDVec_f postdec(SIMDVecMask<VEC_LEN> const & mask) { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i]--; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // PREFDEC UME_FORCE_INLINE SIMDVec_f & prefdec() { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { --local_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator-- () { return prefdec(); } // MPREFDEC UME_FORCE_INLINE SIMDVec_f & prefdec(SIMDVecMask<VEC_LEN> const & mask) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) --local_ptr[i]; } return *this; } // MULV UME_FORCE_INLINE SIMDVec_f mul(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVec_f operator* (SIMDVec_f const & b) const { return mul(b); } // MMULV UME_FORCE_INLINE SIMDVec_f mul(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MULS UME_FORCE_INLINE SIMDVec_f mul(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] * b; } return retval; } UME_FORCE_INLINE SIMDVec_f operator* (SCALAR_FLOAT_TYPE b) const { return mul(b); } // MMULS UME_FORCE_INLINE SIMDVec_f mul(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * b; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MULVA UME_FORCE_INLINE SIMDVec_f & mula(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] *= local_b_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator*= (SIMDVec_f const & b) { return mula(b); } // MMULVA UME_FORCE_INLINE SIMDVec_f & mula(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] *= local_b_ptr[i]; } return *this; } // MULSA UME_FORCE_INLINE SIMDVec_f & mula(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] *= b; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator*= (SCALAR_FLOAT_TYPE b) { return mula(b); } // MMULSA UME_FORCE_INLINE SIMDVec_f & mula(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] *= b; } return *this; } // DIVV UME_FORCE_INLINE SIMDVec_f div(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] / local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVec_f operator/ (SIMDVec_f const & b) const { return div(b); } // MDIVV UME_FORCE_INLINE SIMDVec_f div(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] / local_b_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // DIVS UME_FORCE_INLINE SIMDVec_f div(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] / b; } return retval; } UME_FORCE_INLINE SIMDVec_f operator/ (SCALAR_FLOAT_TYPE b) const { return div(b); } // MDIVS UME_FORCE_INLINE SIMDVec_f div(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] / b; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // DIVVA UME_FORCE_INLINE SIMDVec_f & diva(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] /= local_b_ptr[i]; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator/= (SIMDVec_f const & b) { return diva(b); } // MDIVVA UME_FORCE_INLINE SIMDVec_f & diva(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] /= local_b_ptr[i]; } return *this; } // DIVSA UME_FORCE_INLINE SIMDVec_f & diva(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] /= b; } return *this; } UME_FORCE_INLINE SIMDVec_f & operator/= (SCALAR_FLOAT_TYPE b) { return diva(b); } // MDIVSA UME_FORCE_INLINE SIMDVec_f & diva(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] /= b; } return *this; } // RCP UME_FORCE_INLINE SIMDVec_f rcp() const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = SCALAR_FLOAT_TYPE(1.0f) / local_ptr[i]; } return retval; } // MRCP UME_FORCE_INLINE SIMDVec_f rcp(SIMDVecMask<VEC_LEN> const & mask) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = SCALAR_FLOAT_TYPE(1.0f) / local_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // RCPS UME_FORCE_INLINE SIMDVec_f rcp(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = b / local_ptr[i]; } return retval; } // MRCPS UME_FORCE_INLINE SIMDVec_f rcp(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = b / local_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // RCPA UME_FORCE_INLINE SIMDVec_f & rcpa() { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = SCALAR_FLOAT_TYPE(1.0f) / local_ptr[i]; } return *this; } // MRCPA UME_FORCE_INLINE SIMDVec_f & rcpa(SIMDVecMask<VEC_LEN> const & mask) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] = SCALAR_FLOAT_TYPE(1.0f) / local_ptr[i]; } return *this; } // RCPSA UME_FORCE_INLINE SIMDVec_f & rcpa(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = b / local_ptr[i]; } return *this; } // MRCPSA UME_FORCE_INLINE SIMDVec_f & rcpa(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] = b / local_ptr[i]; } return *this; } // CMPEQV UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpeq(SIMDVec_f const & b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] == local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator== (SIMDVec_f const & b) const { return cmpeq(b); } // CMPEQS UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpeq(SCALAR_FLOAT_TYPE b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] == b; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator== (SCALAR_FLOAT_TYPE b) const { return cmpeq(b); } // CMPNEV UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpne(SIMDVec_f const & b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] != local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator!= (SIMDVec_f const & b) const { return cmpne(b); } // CMPNES UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpne(SCALAR_FLOAT_TYPE b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] != b; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator!= (SCALAR_FLOAT_TYPE b) const { return cmpne(b); } // CMPGTV UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpgt(SIMDVec_f const & b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] > local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator> (SIMDVec_f const & b) const { return cmpgt(b); } // CMPGTS UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpgt(SCALAR_FLOAT_TYPE b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] > b; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator> (SCALAR_FLOAT_TYPE b) const { return cmpgt(b); } // CMPLTV UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmplt(SIMDVec_f const & b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] < local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator< (SIMDVec_f const & b) const { return cmplt(b); } // CMPLTS UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmplt(SCALAR_FLOAT_TYPE b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] < b; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator< (SCALAR_FLOAT_TYPE b) const { return cmplt(b); } // CMPGEV UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpge(SIMDVec_f const & b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] >= local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator>= (SIMDVec_f const & b) const { return cmpge(b); } // CMPGES UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpge(SCALAR_FLOAT_TYPE b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] >= b; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator>= (SCALAR_FLOAT_TYPE b) const { return cmpge(b); } // CMPLEV UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmple(SIMDVec_f const & b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] <= local_b_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator<= (SIMDVec_f const & b) const { return cmple(b); } // CMPLES UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmple(SCALAR_FLOAT_TYPE b) const { SIMDVecMask<VEC_LEN> retval; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool * local_retval_ptr = &retval.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] <= b; } return retval; } UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator<= (SCALAR_FLOAT_TYPE b) const { return cmple(b); } // CMPEV UME_FORCE_INLINE bool cmpe(SIMDVec_f const & b) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool local_mask_ptr[VEC_LEN]; bool retval = true; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_mask_ptr[i] = local_ptr[i] == local_b_ptr[i]; } #pragma omp simd reduction(&&:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval && local_mask_ptr[i]; } return retval; } // CMPES UME_FORCE_INLINE bool cmpe(SCALAR_FLOAT_TYPE b) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool local_mask_ptr[VEC_LEN]; bool retval = true; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_mask_ptr[i] = local_ptr[i] == b; } #pragma omp simd reduction(&&:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval && local_mask_ptr[i]; } return retval; } // UNIQUE // TODO // HADD UME_FORCE_INLINE SCALAR_FLOAT_TYPE hadd() const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE retval = SCALAR_FLOAT_TYPE(0.0f); #pragma omp simd reduction(+:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval + local_ptr[i]; } return retval; } // MHADD UME_FORCE_INLINE SCALAR_FLOAT_TYPE hadd(SIMDVecMask<VEC_LEN> const & mask) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE masked_copy[VEC_LEN]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE retval = SCALAR_FLOAT_TYPE(0.0f); #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i]; else masked_copy[i] = SCALAR_FLOAT_TYPE(0.0f); } #pragma omp simd reduction(+:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval + masked_copy[i]; } return retval; } // HADDS UME_FORCE_INLINE SCALAR_FLOAT_TYPE hadd(SCALAR_FLOAT_TYPE b) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE retval = b; #pragma omp simd reduction(+:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval + local_ptr[i]; } return retval; } // MHADDS UME_FORCE_INLINE SCALAR_FLOAT_TYPE hadd(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE masked_copy[VEC_LEN]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE retval = b; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i]; else masked_copy[i] = SCALAR_FLOAT_TYPE(0.0f); } #pragma omp simd reduction(+:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval + masked_copy[i]; } return retval; } // HMUL UME_FORCE_INLINE SCALAR_FLOAT_TYPE hmul() const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE retval = SCALAR_FLOAT_TYPE(1.0f); #pragma omp simd reduction(*:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval * local_ptr[i]; } return retval; } // MHMUL UME_FORCE_INLINE SCALAR_FLOAT_TYPE hmul(SIMDVecMask<VEC_LEN> const & mask) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE masked_copy[VEC_LEN]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE retval = SCALAR_FLOAT_TYPE(1.0f); #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i]; else masked_copy[i] = SCALAR_FLOAT_TYPE(1.0f); } #pragma omp simd reduction(*:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval * masked_copy[i]; } return retval; } // HMULS UME_FORCE_INLINE SCALAR_FLOAT_TYPE hmul(SCALAR_FLOAT_TYPE b) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE retval = b; #pragma omp simd reduction(*:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval * local_ptr[i]; } return retval; } // MHMULS UME_FORCE_INLINE SCALAR_FLOAT_TYPE hmul(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE masked_copy[VEC_LEN]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE retval = b; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i]; else masked_copy[i] = SCALAR_FLOAT_TYPE(1.0f); } #pragma omp simd reduction(*:retval) for(unsigned int i = 0; i < VEC_LEN; i++) { retval = retval * masked_copy[i]; } return retval; } // FMULADDV UME_FORCE_INLINE SIMDVec_f fmuladd(SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] + local_c_ptr[i]; } return retval; } // MFMULADDV UME_FORCE_INLINE SIMDVec_f fmuladd(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] + local_c_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // FMULSUBV UME_FORCE_INLINE SIMDVec_f fmulsub(SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] - local_c_ptr[i]; } return retval; } // MFMULSUBV UME_FORCE_INLINE SIMDVec_f fmulsub(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] - local_c_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // FADDMULV UME_FORCE_INLINE SIMDVec_f faddmul(SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = (local_ptr[i] + local_b_ptr[i]) * local_c_ptr[i]; } return retval; } // MFADDMULV UME_FORCE_INLINE SIMDVec_f faddmul(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = (local_ptr[i] + local_b_ptr[i]) * local_c_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // FSUBMULV UME_FORCE_INLINE SIMDVec_f fsubmul(SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = (local_ptr[i] - local_b_ptr[i]) * local_c_ptr[i]; } return retval; } // MFSUBMULV UME_FORCE_INLINE SIMDVec_f fsubmul(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b, SIMDVec_f const & c) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; SCALAR_FLOAT_TYPE const * local_c_ptr = &c.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = (local_ptr[i] - local_b_ptr[i]) * local_c_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MAXV UME_FORCE_INLINE SIMDVec_f max(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] > local_b_ptr[i]) local_retval_ptr[i] = local_ptr[i]; else local_retval_ptr[i] = local_b_ptr[i]; } return retval; } // MMAXV UME_FORCE_INLINE SIMDVec_f max(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] > local_b_ptr[i]; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_retval_ptr[i] = local_b_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MAXS UME_FORCE_INLINE SIMDVec_f max(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] > b) local_retval_ptr[i] = local_ptr[i]; else local_retval_ptr[i] = b; } return retval; } // MMAXS UME_FORCE_INLINE SIMDVec_f max(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] > b; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_retval_ptr[i] = b; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MAXVA UME_FORCE_INLINE SIMDVec_f & maxa(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] <= local_b_ptr[i]) local_ptr[i] = local_b_ptr[i]; } return *this; } // MMAXVA UME_FORCE_INLINE SIMDVec_f & maxa(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] > local_b_ptr[i]; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_ptr[i] = local_b_ptr[i]; } return *this; } // MAXSA UME_FORCE_INLINE SIMDVec_f & maxa(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] <= b) local_ptr[i] = b; } return *this; } // MMAXSA UME_FORCE_INLINE SIMDVec_f & maxa(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] > b; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_ptr[i] = b; } return *this; } // MINV UME_FORCE_INLINE SIMDVec_f min(SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] < local_b_ptr[i]) local_retval_ptr[i] = local_ptr[i]; else local_retval_ptr[i] = local_b_ptr[i]; } return retval; } // MMINV UME_FORCE_INLINE SIMDVec_f min(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] < local_b_ptr[i]; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_retval_ptr[i] = local_b_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MINS UME_FORCE_INLINE SIMDVec_f min(SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] < b) local_retval_ptr[i] = local_ptr[i]; else local_retval_ptr[i] = b; } return retval; } // MMINS UME_FORCE_INLINE SIMDVec_f min(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] < b; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_retval_ptr[i] = b; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // MINVA UME_FORCE_INLINE SIMDVec_f & mina(SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] > local_b_ptr[i]) local_ptr[i] = local_b_ptr[i]; } return *this; } // MMINVA UME_FORCE_INLINE SIMDVec_f & mina(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_f const & b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; SCALAR_FLOAT_TYPE const * local_b_ptr = &b.mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] < local_b_ptr[i]; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_ptr[i] = local_b_ptr[i]; } return *this; } // MINSA UME_FORCE_INLINE SIMDVec_f & mina(SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] > b) local_ptr[i] = b; } return *this; } // MMINSA UME_FORCE_INLINE SIMDVec_f & mina(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE b) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] < b; bool cond = local_mask_ptr[i] && !predicate; if(cond) local_ptr[i] = b; } return *this; } // HMAX // MHMAX // IMAX // MIMAX // HMIN // MHMIN // IMIN // MIMIN // GATHERS UME_FORCE_INLINE SIMDVec_f & gather(SCALAR_FLOAT_TYPE const * baseAddr, SCALAR_UINT_TYPE const * indices) { for(unsigned int i = 0; i < VEC_LEN; i++) { mVec[i] = baseAddr[indices[i]]; } return *this; } // MGATHERS UME_FORCE_INLINE SIMDVec_f & gather(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE const * baseAddr, SCALAR_UINT_TYPE const * indices) { for(unsigned int i = 0; i < VEC_LEN; i++) { if(mask.mMask[i] == true) mVec[i] = baseAddr[indices[i]]; } return *this; } // GATHERV UME_FORCE_INLINE SIMDVec_f & gather(SCALAR_FLOAT_TYPE const * baseAddr, SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN> const & indices) { for(unsigned int i = 0; i < VEC_LEN; i++) { mVec[i] = baseAddr[indices.mVec[i]]; } return *this; } // MGATHERV UME_FORCE_INLINE SIMDVec_f & gather(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE const * baseAddr, SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN> const & indices) { for(unsigned int i = 0; i < VEC_LEN; i++) { if(mask.mMask[i] == true) mVec[i] = baseAddr[indices.mVec[i]]; } return *this; } // SCATTERS UME_FORCE_INLINE SCALAR_FLOAT_TYPE* scatter(SCALAR_FLOAT_TYPE* baseAddr, SCALAR_UINT_TYPE* indices) const { for(unsigned int i = 0; i < VEC_LEN; i++) { baseAddr[indices[i]] = mVec[i]; } return baseAddr; } // MSCATTERS UME_FORCE_INLINE SCALAR_FLOAT_TYPE* scatter(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE* baseAddr, SCALAR_UINT_TYPE* indices) const { for(unsigned int i = 0; i < VEC_LEN; i++) { if(mask.mMask[i]) baseAddr[indices[i]] = mVec[i]; } return baseAddr; } // SCATTERV UME_FORCE_INLINE SCALAR_FLOAT_TYPE* scatter(SCALAR_FLOAT_TYPE* baseAddr, SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN> const & indices) const { for(unsigned int i = 0; i < VEC_LEN; i++) { baseAddr[indices.mVec[i]] = mVec[i]; } return baseAddr; } // MSCATTERV UME_FORCE_INLINE SCALAR_FLOAT_TYPE* scatter(SIMDVecMask<VEC_LEN> const & mask, SCALAR_FLOAT_TYPE* baseAddr, SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN> const & indices) const { for(unsigned int i = 0; i < VEC_LEN; i++) { if(mask.mMask[i]) baseAddr[indices.mVec[i]] = mVec[i]; } return baseAddr; } // NEG UME_FORCE_INLINE SIMDVec_f neg() const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = -local_ptr[i]; } return retval; } UME_FORCE_INLINE SIMDVec_f operator- () const { return neg(); } // MNEG UME_FORCE_INLINE SIMDVec_f neg(SIMDVecMask<VEC_LEN> const & mask) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_retval_ptr[i] = -local_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // NEGA UME_FORCE_INLINE SIMDVec_f & nega() { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_ptr[i] = -local_ptr[i]; } return *this; } // MNEGA UME_FORCE_INLINE SIMDVec_f & nega(SIMDVecMask<VEC_LEN> const & mask) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i] == true) local_ptr[i] = -local_ptr[i]; } return *this; } // ABS UME_FORCE_INLINE SIMDVec_f abs() const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] >= 0 ) local_retval_ptr[i] = local_ptr[i]; else local_retval_ptr[i] = -local_ptr[i]; } return retval; } // MABS UME_FORCE_INLINE SIMDVec_f abs(SIMDVecMask<VEC_LEN> const & mask) const { SIMDVec_f retval; SCALAR_FLOAT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] < 0; bool cond = local_mask_ptr[i] && predicate; if(cond) local_retval_ptr[i] = -local_ptr[i]; else local_retval_ptr[i] = local_ptr[i]; } return retval; } // ABSA UME_FORCE_INLINE SIMDVec_f & absa() { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_ptr[i] < 0 ) local_ptr[i] = -local_ptr[i]; } return *this; } // MABSA UME_FORCE_INLINE SIMDVec_f & absa(SIMDVecMask<VEC_LEN> const & mask) { SCALAR_FLOAT_TYPE * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { bool predicate = local_ptr[i] < 0; bool cond = local_mask_ptr[i] && predicate; if(cond) local_ptr[i] = -local_ptr[i]; } return *this; } // CMPEQRV // CMPEQRS // SQR // MSQR // SQRA // MSQRA // SQRT // MSQRT // SQRTA // MSQRTA // POWV // MPOWV // POWS // MPOWS // ROUND // MROUND // TRUNC UME_FORCE_INLINE SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN> trunc() const { SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN> retval; SCALAR_INT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { local_retval_ptr[i] = (SCALAR_INT_TYPE) local_ptr[i]; } return retval; } // MTRUNC UME_FORCE_INLINE SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN> trunc(SIMDVecMask<VEC_LEN> const & mask) const { SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN> retval; SCALAR_INT_TYPE * local_retval_ptr = &retval.mVec[0]; SCALAR_FLOAT_TYPE const * local_ptr = &mVec[0]; bool const * local_mask_ptr = &mask.mMask[0]; #pragma omp simd safelen(VEC_LEN) for(unsigned int i = 0; i < VEC_LEN; i++) { if(local_mask_ptr[i]) local_retval_ptr[i] = (SCALAR_INT_TYPE) local_ptr[i]; else local_retval_ptr[i] = 0; } return retval; } // FLOOR // MFLOOR // CEIL // MCEIL // ISFIN // ISINF // ISAN // ISNAN // ISSUB // ISZERO // ISZEROSUB // SIN // MSIN // COS // MCOS // TAN // MTAN // CTAN // MCTAN // PACK // PACKLO // PCAKHI // UNPACK // UNPACKLO // UNPACKHI // DEGRADE UME_FORCE_INLINE operator SIMDVec_f<SCALAR_FLOAT_LOWER_PRECISION, VEC_LEN>() const; // PROMOTE UME_FORCE_INLINE operator SIMDVec_f<SCALAR_FLOAT_HIGHER_PRECISION, VEC_LEN>() const; // FTOU UME_FORCE_INLINE operator SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN>() const; // FTOI UME_FORCE_INLINE operator SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN>() const; }; // SIMD NullTypes. These are used whenever a terminating // scalar type is used as a creator function for SIMD type. // These types cannot be instantiated, but are necessary for // typeset to be consistent. template<> class SIMDVec_f<NullType<1>, 1> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 2> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 4> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 8> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 16> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 32> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 64> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<1>, 128> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 1> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 2> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 4> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 8> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 16> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 32> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 64> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<2>, 128> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 1> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 2> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 4> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 8> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 16> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 32> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 64> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; template<> class SIMDVec_f<NullType<3>, 128> { public: // private: SIMDVec_f() {} ~SIMDVec_f() {} }; } } #endif
bp.c
#include<stdio.h> #include "gdal.h" #include<omp.h> #define NODATA 28768 void usage() { printf( "-----------------------------------------\n"); printf( "--Modis Processing chain--Serial code----\n"); printf( "-----------------------------------------\n"); printf( "./ndvi inNDVI inET inPET\n"); printf( "\toutNDVI\n"); printf( "\tDOY Tsw\n"); printf( "-----------------------------------------\n"); printf( "inNDVI\t\tModis MOD13Q1 NDVI 250m\n"); printf( "inET\t\tModis MOD16A2 ET 500m\n"); printf( "inPET\t\tModis MOD16A2 PET 500m\n"); printf( "outNDVI\tQA corrected NDVI output [-]\n"); printf( "DOY\tDay of Year\n"); printf( "Tsw\tTransmissivity single-way [-]\n"); return; } double biomass(double fpar, double solar_day, double evap_fr, double light_use_ef); double solar_day(double lat, double doy, double tsw); int main( int argc, char *argv[] ) { if( argc < 6 ) { usage(); return 1; } char *inB3 = argv[1]; //NDVI_QA 250m char *inB4 = argv[2]; // ET 500m char *inB5 = argv[3]; // PET 500m char *bpF = argv[4]; int doy = atoi(argv[5]); // DOY for solar day float tsw = atof(argv[6]); // TSW for solar day GDALAllRegister(); GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//NDVI 250m GDALDatasetH hD4 = GDALOpen(inB4,GA_ReadOnly);//ET 500m GDALDatasetH hD5 = GDALOpen(inB5,GA_ReadOnly);//PET 500m if(hD3==NULL||hD4==NULL||hD5==NULL){ printf("One or more input files "); printf("could not be loaded\n"); exit(1); } //LOAD 250 m bands and create output GDALDriverH hDr3 = GDALGetDatasetDriver(hD3); char **options = NULL; options = CSLSetNameValue( options, "TILED", "YES" ); options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" ); options = CSLSetNameValue( options, "PREDICTOR", "2" ); GDALDatasetH hDOut = GDALCreateCopy(hDr3,bpF,hD3,FALSE,options,NULL,NULL); GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1); GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//NDVI 250m GDALRasterBandH hB4 = GDALGetRasterBand(hD4,1);//ET 500m GDALRasterBandH hB5 = GDALGetRasterBand(hD5,1);//PET 500m double geomx[6]={0.0}; if(GDALGetGeoTransform(hD3,geomx)==CE_None){ /* Do Nothing */ //printf( "Origin (ULx,ULy) = (%.6f,%.6f)\n", geomx[0], geomx[3] ); //printf( "Pixel Size = (%.6f,%.6f)\n", geomx[1], geomx[5] ); //printf( "Rot0 = (%.6f,%.6f)\n", geomx[2], geomx[4] ); } else { printf("ERROR: Projection acquisition problem from Band1\n"); exit(1); } int nX = GDALGetRasterBandXSize(hB3);//From NDVI@250m int nY = GDALGetRasterBandYSize(hB3);//From NDVI@250m int16_t *l3 = (int16_t *) malloc(sizeof(int16_t)*nX); int16_t *l4 = (int16_t *) malloc(sizeof(int16_t)*nX/2); int16_t *l5 = (int16_t *) malloc(sizeof(int16_t)*nX/2); unsigned int *lOut = (unsigned int *) malloc(sizeof(unsigned int) *nX); int row,col; int minimum=100000.0; int maximum = 0.0; for(row=0;row<nY;row++){ GDALRasterIO(hB3,GF_Read,0,row,nX,1,l3,nX,1,GDT_Int16,0,0); GDALRasterIO(hB4,GF_Read,0,row/2,nX/2,1,l4,nX/2,1,GDT_Int16,0,0); GDALRasterIO(hB5,GF_Read,0,row/2,nX/2,1,l5,nX/2,1,GDT_Int16,0,0); #pragma omp parallel for default(none) \ private (col) \ shared (row,geomx,doy,tsw,nX,nY,l3,l4,l5,lOut,minimum,maximum) for(col=0;col<nX;col++){ if(l4[col/2]<32000||l5[col/2]<32000){ double lat=geomx[3]+geomx[4]*col+geomx[5]*row; double solar = solar_day(lat, doy, tsw ); if(solar<0.0) solar=0.0; double evapfr = l4[col/2]/(1.0*l5[col/2]); if(evapfr<0.0) evapfr=0.0; double fpar=0.0; fpar = 1.257*(l3[col]/10000.0)-0.161; if(fpar<0.0) fpar=0.0; lOut[col]=(unsigned int) 10000.0*biomass(fpar,solar,evapfr,1.0); if(lOut[col]<minimum)minimum=lOut[col]; if(lOut[col]>maximum)maximum=lOut[col]; }else{ lOut[col] = NODATA; } } #pragma omp barrier GDALRasterIO(hBOut,GF_Write,0,row,nX,1,lOut,nX,1,GDT_UInt32,0,0); } printf("min/max: %f %f [kg/ha/day]\n",minimum/10000.0,maximum/10000.0); GDALSetRasterNoDataValue(hBOut,NODATA); GDALSetRasterUnitType(hBOut,"Biomass [kg/ha/day]"); GDALSetDescription(hBOut,"250m 8 Days mean Biomass"); GDALSetMetadataItem(hDOut,"*","",NULL); if(l3 != NULL) free(l3); if(l4 != NULL) free(l4); if(l5 != NULL) free(l5); GDALClose(hD3); GDALClose(hD4); GDALClose(hD5); GDALClose(hDOut); return(EXIT_SUCCESS); }
emd.h
// fix inttypes for GCC #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <cinttypes> // fix for the fix - it conflicts with numpy #undef __STDC_FORMAT_MACROS #include <cstdint> #include <cmath> #include <algorithm> #include "cache.h" #include "graph/min_cost_flow.h" /*! @mainpage libwmdrelax * * @section s0 Description * This library allows to efficinetly solve the Earth Mover's Distance * problem (http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/RUBNER/emd.htm). * It also solves the relaxed approximation suitable for calculating the * Word Mover's Distance (http://www.cs.cornell.edu/~kilian/papers/wmd_metric.pdf), * hence the name. * * Project: https://github.com/src-d/wmd-relax * * README: @ref ignore_this_doxygen_anchor * * @section s1 C/C++ API * - emd() solves the original Earth Mover's Distance problem. * - emd_relaxed() solves the relaxed problem - one of the two sums is replaced * with the maximum element. * - EMDCache and EMDRelaxedCache are the caches to prevent from dynamic memory * allocation. * * Although C/C++ API is complete and totally usable as-is, python.cc provides * the Python 3 API. * * @section s2 Python 3 API * * - emd_relaxed() * - emd_relaxed_cache_init() creates the cache object for emd_relaxed() * - emd_relaxed_cache_fini() destroys the cache object for emd_relaxed() * - emd() * - emd_cache_init() creates the cache object for emd() * - emd_cache_fini() destroys the cache object for emd() * * @section s3 Building * * Normally, the library is built with setup.py as a part of the python package. * Besides, it can be built with cmake. In the latter case, ensure that you've * cloned or-tools submodule: * @code{.unparsed} * git submodule update --init * @endcode */ namespace { const int64_t MASS_MULT = 1000 * 1000 * 1000; // weights quantization constant const int64_t COST_MULT = 1000 * 1000; // costs quantization constant /// The cache for emd(). class EMDCache : public wmd::Cache { public: bool* side() const noexcept { return side_.get(); } int64_t* demand() const noexcept { return demand_.get(); } int64_t* cost() const noexcept { return cost_.get(); } size_t get_size() const noexcept { return size_; } operations_research::SimpleMinCostFlow& min_cost_flow() const noexcept { return min_cost_flow_; } protected: void _allocate() override { side_.reset(new bool[size_]); demand_.reset(new int64_t[size_]); cost_.reset(new int64_t[size_ * size_]); // warmup min_cost_flow_ for (size_t i = 0; i < size_; i++) { for (size_t j = 0; j < size_; j++) { min_cost_flow_.AddArcWithCapacityAndUnitCost(i, j, 1, 1); } } for (size_t i = 0; i < size_; i++) { min_cost_flow_.SetNodeSupply(i, 1); } min_cost_flow_.Reset(); } void _reset() noexcept override { side_.reset(); demand_.reset(); cost_.reset(); min_cost_flow_.Reset(); } private: mutable std::unique_ptr<bool[]> side_; mutable std::unique_ptr<int64_t[]> demand_; mutable std::unique_ptr<int64_t[]> cost_; mutable operations_research::SimpleMinCostFlow min_cost_flow_; mutable std::mutex lock_; }; /// Used by emd() to convert the problem to min cost flow. template <typename T> void convert_weights(const T*__restrict__ in, bool sign, int64_t*__restrict__ out, size_t size) { assert(in && out); assert(size > 0); int64_t sum = 0; double old_s = 0, new_s = 0; double mult = (sign ? -1 : 1); #pragma omp simd for (size_t i = 0; i < size; i++) { old_s = new_s; new_s = old_s + in[i]; int64_t w = round(new_s * MASS_MULT) - round(old_s * MASS_MULT); sum += w; out[i] += w * mult; } if (sum != MASS_MULT) { if (fabs(sum - MASS_MULT + 0.) / MASS_MULT > 0.000001) { #ifndef NDEBUG assert(sum == MASS_MULT && "Masses on one side not sufficiently normalized."); #else fprintf(stderr, "wmd: weights are not normalized: %" PRId64 " != %" PRId64 "\n", sum, MASS_MULT); #endif } else { // compensate for the rounding error out[0] += (sign ? 1 : -1) * (sum - MASS_MULT); } } } /// Used by emd() to convert the problem to min cost flow. template <typename T> void convert_costs(const T*__restrict__ in, const bool*__restrict__ side, int64_t*__restrict__ out, size_t size) { #pragma omp simd for (size_t i = 0; i < size; i++) { for (size_t j = 0; j < size; j++) { out[i * size + j] = round(in[i * size + j] * COST_MULT); } } #pragma omp simd for (size_t i = 0; i < size; i++) { for (size_t j = 0; j < size; j++) { if (side[i] && !side[j]) { out[i * size + j] = -out[j * size + i]; } } } } } // namespace /// Solves the exact EMD problem. Internally, it converts the conditions to /// a min cost flow statement and calls operations_research::SimpleMinCostFlow. /// @param w1 The first array with weights of length `size`. /// @param w2 The second array with weights of length `size`. /// @param dist The costs matrix of shape `size` x `size`. /// @param size The dimensionality of the problem. /// @param cache The cache to use. It should be initialized with at least `size` /// elements. /// @author Wojciech Jabłoński <wj359634@students.mimuw.edu.pl> template <typename T> T emd(const T*__restrict__ w1, const T*__restrict__ w2, const T*__restrict__ dist, uint32_t size, const EMDCache& cache) { assert(w1 && w2 && dist); assert(size > 0); std::lock_guard<std::mutex> _(cache.enter(size)); bool* side = cache.side(); int64_t* demand = cache.demand(); int64_t* cost = cache.cost(); memset(demand, 0, size * sizeof(demand[0])); convert_weights(w1, false, demand, size); convert_weights(w2, true, demand, size); #pragma omp simd for (size_t i = 0; i < size; i++) { side[i] = (demand[i] < 0); } convert_costs(dist, side, cost, size); auto& min_cost_flow = cache.min_cost_flow(); for (size_t i = 0; i < size; i++) { for (size_t j = 0; j < size; j++) { if (!side[i] && side[j]) { min_cost_flow.AddArcWithCapacityAndUnitCost( i, j, std::min(demand[i], -demand[j]), cost[i * size + j]); } } } for (size_t i = 0; i < size; i++) { min_cost_flow.SetNodeSupply(i, demand[i]); } auto status = min_cost_flow.Solve(); double result = min_cost_flow.OptimalCost(); min_cost_flow.Reset(); #ifndef NDEBUG assert(status == operations_research::SimpleMinCostFlow::OPTIMAL); #else if (status != operations_research::SimpleMinCostFlow::OPTIMAL) { fprintf(stderr, "wmd: status is %d\n", status); return -status; } #endif return T((result / MASS_MULT) / COST_MULT); }
GB_binop__lor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_01__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_03__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int8) // A*D function (colscale): GB (_AxD__lor_int8) // D*A function (rowscale): GB (_DxB__lor_int8) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int8) // C=scalar+B GB (_bind1st__lor_int8) // C=scalar+B' GB (_bind1st_tran__lor_int8) // C=A+scalar GB (_bind2nd__lor_int8) // C=A'+scalar GB (_bind2nd_tran__lor_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tsp_rnd05.c
/* Description: This program executes my "Random Swapping" algorithm to solve the "Travelling Salesman Problem" Author: Georgios Evangelou (1046900) Year: 5 Parallel Programming in Machine Learning Problems Electrical and Computer Engineering Department, University of Patras System Specifications: CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips) GPU: Nvidia GTX 1050 (dual-fan, overclocked) RAM: 8GB (dual-channel, @2666 MHz) Version Notes: Compiles/Runs/Debugs with: gcc tsp_rnd05.c -o tsp_rnd05 -lm -fopt-info -pg -fopenmp -O3 && time ./tsp_rnd05 && gprof ./tsp_rnd05 Executes the algorithm for 10.000 cities, spanning in an area of 1.000x1.000 km and produces correct results Inherits all settings of the previous version unless stated otherwise Because of the fact that the use of rand() by a thread blocks other threads from executing it, each thread is given a pseudo-random list of numbers beforehand (<RandomSequences> array) Initializing the <RandomSequences> array takes a lot of time. Thus, a good speed improvement in the next version could be achieved if one of the threads became responsible for refilling the array while the others perform the swappings Best time achieved: ~ 58 seconds to find a path below 500000 total distance with all optimizations listed below and 01 thread ~ 27 seconds to find a path below 500000 total distance with all optimizations listed below and 12 threads */ // **************************************************************************************************************** #pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations #pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system #pragma GCC target("avx") //Enable AVX // **************************************************************************************************************** #include "stdio.h" #include "stdlib.h" #include "math.h" #include "omp.h" // **************************************************************************************************************** #define N 10000 #define Nx 1000 #define Ny 1000 #define VACANT_POSITION_CODE -999999 #define THREADS 12 #define STEPS_PER_THREAD_PER_REPETITION 20000000 #define DEFAULT_MAX_REPETITIONS 100000 #define THRESHOLD 500000 #define RANDOM_NUMBERS_PER_THREAD 2*STEPS_PER_THREAD_PER_REPETITION #define STEPS_PER_THREAD_PER_REPETITION_2 2*STEPS_PER_THREAD_PER_REPETITION // **************************************************************************************************************** float CitiesX[N]; float CitiesY[N]; int Path[N+1]; omp_lock_t Locks[N+1]; int RandomSequences[THREADS][RANDOM_NUMBERS_PER_THREAD]; // **************************************************************************************************************** // Initializes the cities' positions // **************************************************************************************************************** void SetCities() { printf("Now initializing the positions of the cities...\n"); for (int i=0; i<N; i++) { CitiesX[i] = Nx * (float) rand() / RAND_MAX; CitiesY[i] = Ny * (float) rand() / RAND_MAX; } } // **************************************************************************************************************** // Initializes the traveling path // **************************************************************************************************************** void ResetPath() { printf("Now initializing the path...\n"); for (int i=0; i<N+1; i++) Path[i] = -1; } // **************************************************************************************************************** // Checks if a city is already in the path // **************************************************************************************************************** int IsInPath(int k) { for (int i=0; i<N; i++) if (Path[i] == k) return 1; return 0; } // **************************************************************************************************************** // Creates a random path // **************************************************************************************************************** void RandomizePath() { int k; printf("Now randomizing the path...\n"); Path[0] = (N*rand())/RAND_MAX; Path[N] = Path[0]; for (int i=1; i<N; i++) { do { k = ((float)N*rand())/RAND_MAX; } while (IsInPath(k) == 1); Path[i] = k; } } // **************************************************************************************************************** // Prints the cities' positions // **************************************************************************************************************** void PrintCities() { int x, y; printf("> The cities are:\n"); for (int i=0; i<N; i++) { printf(">> City: %6d X:%5.2f Y:%5.2f\n", i, CitiesX[i], CitiesY[i] ); } printf("\n"); } // **************************************************************************************************************** // Visually maps the cities' positions // **************************************************************************************************************** void MapCities() { int Map[Ny+1][Nx+1]; printf("Now creating a visual map of the cities...\n"); for (int i=0; i<Nx+1; i++) for (int j=0; j<Ny+1; j++) Map[j][i] = (float) VACANT_POSITION_CODE; //printf("Quantized coordinates are:\n"); for (int c=0; c<N; c++) { int x = (int) CitiesX[c] ; int y = (int) CitiesY[c] ; //printf(" City:%d y=%d and x=%d\n",c,y,x); if (Map[y][x] == VACANT_POSITION_CODE) Map[y][x] = c+1; else Map[y][x] = -1; } printf("This is the cities' map:\n"); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); for (int y=0; y<Ny+1; y++){ for (int x=0; x<Nx+1; x++) printf("%8d ", Map[y][x]); printf("\n"); } printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("\n"); } // **************************************************************************************************************** // Finds Euclidean Distance between two cities // **************************************************************************************************************** double Distance(int A, int B) { double result = sqrt( (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) ); return result; } // **************************************************************************************************************** // Finds Euclidean Distance in current path // **************************************************************************************************************** double PathDistance() { double totDist = 0.0; for (int i=0; i<N; i++) { totDist += Distance(Path[i], Path[i+1]); } totDist += Distance(Path[N], Path[0]); return totDist; } // **************************************************************************************************************** // Swaps cities if swapping results in shorter Distance // **************************************************************************************************************** double SwapCities(double totDist) { double totDistChange = 0.0; #pragma omp parallel reduction(+:totDistChange) num_threads(THREADS) { int thisThreadsID = omp_get_thread_num(); int r = 0; do { int A = RandomSequences[thisThreadsID][r++]; int B = RandomSequences[thisThreadsID][r++]; //if (r == RANDOM_NUMBERS_PER_THREAD+1) {printf("EXCEEDED STORED RANDOM NUMBERS"); exit(100);} if (A>B) { int temp = A; A = B; B = temp; } //always: A<B int flag = B-A-1; //always:flag=0 when A+1==B omp_set_lock(&Locks[A]); omp_set_lock(&Locks[B]); double dist1_old = Distance(Path[A-1], Path[A]); //is always needed double dist2_old = (!flag) ? 0 : Distance(Path[A], Path[A+1]); //dist ommited when A,B consecutive double dist3_old = (!flag) ? 0 : Distance(Path[B-1], Path[B]); //dist ommited when A,B consecutive double dist4_old = Distance(Path[B], Path[B+1]); //is always needed double dist1_new = Distance(Path[A-1], Path[B]); //is always needed double dist2_new = (!flag) ? 0 : Distance(Path[B], Path[A+1]); //dist ommited when A,B consecutive double dist3_new = (!flag) ? 0 : Distance(Path[B-1], Path[A]); //dist ommited when A,B consecutive double dist4_new = Distance(Path[A], Path[B+1]); //is always needed double distChange = - dist1_old - dist2_old - dist3_old - dist4_old + dist1_new + dist2_new + dist3_new + dist4_new; if (distChange < 0) { //Must be <0 if it decreases the total Distance int temp = Path[A]; Path[A] = Path[B]; Path[B] = temp; } else distChange=0; omp_unset_lock(&Locks[A]); omp_unset_lock(&Locks[B]); totDistChange += distChange; } while (r < STEPS_PER_THREAD_PER_REPETITION_2) ; } return totDist + totDistChange; } // **************************************************************************************************************** // Checks if current program parameters lead to feasible spacial states // **************************************************************************************************************** int ValidateParameters() { if (Nx*Ny<N) return 0; return 1; } // **************************************************************************************************************** // Initializes the locks // **************************************************************************************************************** void InitializeLocks() { for (int i=0; i<N+1; i++) omp_init_lock(&Locks[i]); } // **************************************************************************************************************** // Initializes the random sequences array ensuring adjucent numbers are never the same // **************************************************************************************************************** void InitializeRandomSequencesArray(int rep) { int previous = -1, current; for (int i=0; i<THREADS; i++) { //srand(i*rep*rand()); for (int j=0; j<RANDOM_NUMBERS_PER_THREAD; j++) { do{ current = 1 + (int)( ((float) rand())*(N-2)/((float)RAND_MAX) ); } while(current==previous); RandomSequences[i][j] = current; previous = current; } } } // **************************************************************************************************************** // The main program // **************************************************************************************************************** int main( int argc, const char* argv[] ) { printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("This program searches for the optimal traveling Distance between %d cities,\n", N); printf("spanning in an area of X=(0,%d) and Y=(0,%d)\n", Nx, Ny); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); if (ValidateParameters() == 0) { printf("\nERROR: NOT ENOUGH SPACE ALLOCATED FOR GIVEN NUMBER OF CITIES\nThe program will now exit.\n"); return 1; } int repetitions = 0, MaxRepetitions = DEFAULT_MAX_REPETITIONS; omp_set_dynamic(0); if (argc>1) MaxRepetitions = atoi(argv[1]); printf("Maximum number of repetitions set at: %d\n", MaxRepetitions); printf("Maximum number of steps per thread per repetition set at: %d\n", MaxRepetitions*STEPS_PER_THREAD_PER_REPETITION); SetCities(); ResetPath(); RandomizePath(); InitializeLocks(); double prevDist, totDist = PathDistance(); printf("Now running the main algorithm...\n"); do { prevDist = totDist; repetitions ++; InitializeRandomSequencesArray(repetitions); //srand(repetitions*23); if (repetitions%1==0) printf(">>REPETITION:%8d >>BATCH:%10d >>ESTIMATED PATH_LENGTH: %.1lf\n", repetitions, repetitions*STEPS_PER_THREAD_PER_REPETITION*THREADS, totDist); totDist = SwapCities(totDist); } while ((repetitions<MaxRepetitions) && (totDist>THRESHOLD)); printf("\nCalculations completed. Results:\n"); printf("Main-routine Repetitions: %d\n", repetitions); printf(" Sub-routine Repetitions: %d\n", repetitions*STEPS_PER_THREAD_PER_REPETITION*THREADS); //printf("Estimation of the optimal path length: %.2lf\n", totDist); printf("Actual optimal path length: %.2lf\n", PathDistance()); return 0 ; }
project.c
//----------------------------------------------------------------------------- // project.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/19/14 (Build 5.1.000) // 04/14/14 (Build 5.1.004) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 04/30/15 (Build 5.1.009) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // Author: L. Rossman // // Project management functions. // // This module provides project-related services such as: // o opening a new project and reading its input data // o allocating and freeing memory for project objects // o setting default values for object properties and options // o initializing the internal state of all objects // o managing hash tables for identifying objects by ID name // // Build 5.1.004: // - Ignore RDII option added. // // Build 5.1.007: // - Default monthly adjustments for climate variables included. // - User-supplied GW flow equaitions initialized to NULL. // - Storage node exfiltration object initialized to NULL. // - Freeing of memory used for storage node exfiltration included. // // Build 5.1.008: // - Constants used for dynamic wave routing moved to dynwave.c. // - Input processing of minimum time step & number of // parallel threads for dynamic wave routing added. // - Default values of hyd. conductivity adjustments added. // - Freeing of memory used for outfall pollutant load added. // // Build 5.1.009: // - Fixed bug in computing total duration introduced in 5.1.008. // // Build 5.1.011: // - Memory management of hydraulic event dates array added. // // Build 5.1.012: // - Minimum conduit slope option initialized to 0 (none). // - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <malloc.h> #include <math.h> //(5.1.008) #include <omp.h> //(5.1.008) #include "headers.h" #include "lid.h" #include "hash.h" #include "mempool.h" //----------------------------------------------------------------------------- // Constants //----------------------------------------------------------------------------- //// Constants for DYNWAVE flow routing moved to dynwave.c. //// //(5.1.008) //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names static char MemPoolAllocated; // TRUE if memory pool allocated //----------------------------------------------------------------------------- // External Functions (declared in funcs.h) //----------------------------------------------------------------------------- // project_open (called from swmm_open in swmm5.c) // project_close (called from swmm_close in swmm5.c) // project_readInput (called from swmm_open in swmm5.c) // project_readOption (called from readOption in input.c) // project_validate (called from swmm_open in swmm5.c) // project_init (called from swmm_start in swmm5.c) // project_addObject (called from addObject in input.c) // project_createMatrix (called from openFileForInput in iface.c) // project_freeMatrix (called from iface_closeRoutingFiles) // project_findObject // project_findID //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initPointers(void); static void setDefaults(void); static void openFiles(char *f1, char *f2, char *f3); static void createObjects(void); static void deleteObjects(void); static void createHashTables(void); static void deleteHashTables(void); //============================================================================= void project_open(char *f1, char *f2, char *f3) // // Input: f1 = pointer to name of input file // f2 = pointer to name of report file // f3 = pointer to name of binary output file // Output: none // Purpose: opens a new SWMM project. // { initPointers(); setDefaults(); openFiles(f1, f2, f3); } //============================================================================= void project_readInput() // // Input: none // Output: none // Purpose: retrieves project data from input file. // { // --- create hash tables for fast retrieval of objects by ID names createHashTables(); // --- count number of objects in input file and create them input_countObjects(); createObjects(); // --- read project data from input file input_readData(); if ( ErrorCode ) return; // --- establish starting & ending date/time StartDateTime = StartDate + StartTime; EndDateTime = EndDate + EndTime; ReportStart = ReportStartDate + ReportStartTime; ReportStart = MAX(ReportStart, StartDateTime); // --- check for valid starting & ending date/times if ( EndDateTime <= StartDateTime ) { report_writeErrorMsg(ERR_START_DATE, ""); } else if ( EndDateTime <= ReportStart ) { report_writeErrorMsg(ERR_REPORT_DATE, ""); } else { //// Following code segment was modified for release 5.1.009. //// //(5.1.009) //// // --- compute total duration of simulation in seconds TotalDuration = floor((EndDateTime - StartDateTime) * SECperDAY); // --- reporting step must be <= total duration if ( (double)ReportStep > TotalDuration ) { ReportStep = (int)(TotalDuration); } // --- reporting step can't be < routing step if ( (double)ReportStep < RouteStep ) { report_writeErrorMsg(ERR_REPORT_STEP, ""); } // --- convert total duration to milliseconds TotalDuration *= 1000.0; } //// } //============================================================================= void project_validate() // // Input: none // Output: none // Purpose: checks validity of project data. // { int i; int j; int err; // --- validate Curves and TimeSeries for ( i=0; i<Nobjects[CURVE]; i++ ) { err = table_validate(&Curve[i]); if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); } for ( i=0; i<Nobjects[TSERIES]; i++ ) { err = table_validate(&Tseries[i]); if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]); } // --- validate hydrology objects // (NOTE: order is important !!!!) climate_validate(); lid_validate(); if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE; if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE; for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i); for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i); for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i); for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i); // --- compute geometry tables for each shape curve j = 0; for ( i=0; i<Nobjects[CURVE]; i++ ) { if ( Curve[i].curveType == SHAPE_CURVE ) { Curve[i].refersTo = j; Shape[j].curve = i; if ( !shape_validate(&Shape[j], &Curve[i]) ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); j++; } } // --- validate links before nodes, since the latter can // result in adjustment of node depths for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth; for ( i=0; i<Nobjects[LINK]; i++) link_validate(i); for ( i=0; i<Nobjects[NODE]; i++) node_validate(i); // --- adjust time steps if necessary if ( DryStep < WetStep ) { report_writeWarningMsg(WARN06, ""); DryStep = WetStep; } if ( RouteStep > (double)WetStep ) { report_writeWarningMsg(WARN07, ""); RouteStep = WetStep; } // --- adjust individual reporting flags to match global reporting flag if ( RptFlags.subcatchments == ALL ) for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE; if ( RptFlags.nodes == ALL ) for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE; if ( RptFlags.links == ALL ) for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE; // --- validate dynamic wave options if ( RouteModel == DW ) dynwave_validate(); //(5.1.008) #pragma omp parallel //(5.1.008) { if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008) else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008) } if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008) } //============================================================================= void project_close() // // Input: none // Output: none // Purpose: closes a SWMM project. // { deleteObjects(); deleteHashTables(); } //============================================================================= int project_init(void) // // Input: none // Output: returns an error code // Purpose: initializes the internal state of all objects. // { int j; climate_initState(); lid_initState(); for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]); for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j); for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j); for (j=0; j<Nobjects[NODE]; j++) node_initState(j); for (j=0; j<Nobjects[LINK]; j++) link_initState(j); return ErrorCode; } //============================================================================= int project_addObject(int type, char *id, int n) // // Input: type = object type // id = object ID string // n = object index // Output: returns 0 if object already added, 1 if not, -1 if hashing fails // Purpose: adds an object ID to a hash table // { int result; int len; char *newID; // --- do nothing if object already placed in hash table if ( project_findObject(type, id) >= 0 ) return 0; // --- use memory from the hash tables' common memory pool to store // a copy of the object's ID string len = strlen(id) + 1; newID = (char *) Alloc(len*sizeof(char)); strcpy(newID, id); // --- insert object's ID into the hash table for that type of object result = HTinsert(Htable[type], newID, n); if ( result == 0 ) result = -1; return result; } //============================================================================= int project_findObject(int type, char *id) // // Input: type = object type // id = object ID // Output: returns index of object with given ID, or -1 if ID not found // Purpose: uses hash table to find index of an object with a given ID. // { return HTfind(Htable[type], id); } //============================================================================= char *project_findID(int type, char *id) // // Input: type = object type // id = ID name being sought // Output: returns pointer to location where object's ID string is stored // Purpose: uses hash table to find address of given string entry. // { return HTfindKey(Htable[type], id); } //============================================================================= double ** project_createMatrix(int nrows, int ncols) // // Input: nrows = number of rows (0-based) // ncols = number of columns (0-based) // Output: returns a pointer to a matrix // Purpose: allocates memory for a matrix of doubles. // { int i,j; double **a; // --- allocate pointers to rows a = (double **) malloc(nrows * sizeof(double *)); if ( !a ) return NULL; // --- allocate rows and set pointers to them a[0] = (double *) malloc (nrows * ncols * sizeof(double)); if ( !a[0] ) return NULL; for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols; for ( i = 0; i < nrows; i++) { for ( j = 0; j < ncols; j++) a[i][j] = 0.0; } // --- return pointer to array of pointers to rows return a; } //============================================================================= void project_freeMatrix(double **a) // // Input: a = matrix of floats // Output: none // Purpose: frees memory allocated for a matrix of doubles. // { if ( a != NULL ) { if ( a[0] != NULL ) free( a[0] ); free( a ); } } //============================================================================= int project_readOption(char* s1, char* s2) // // Input: s1 = option keyword // s2 = string representation of option's value // Output: returns error code // Purpose: reads a project option from a pair of string tokens. // // NOTE: all project options have default values assigned in setDefaults(). // { int k, m, h, s; double tStep; char strDate[25]; DateTime aTime; DateTime aDate; // --- determine which option is being read k = findmatch(s1, OptionWords); if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1); switch ( k ) { // --- choice of flow units case FLOW_UNITS: m = findmatch(s2, FlowUnitWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); FlowUnits = m; if ( FlowUnits <= MGD ) UnitSystem = US; else UnitSystem = SI; break; // --- choice of infiltration modeling method case INFIL_MODEL: m = findmatch(s2, InfilModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); InfilModel = m; break; // --- choice of flow routing method case ROUTE_MODEL: m = findmatch(s2, RouteModelWords); if ( m < 0 ) m = findmatch(s2, OldRouteModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); if ( m == NO_ROUTING ) IgnoreRouting = TRUE; else RouteModel = m; if ( RouteModel == EKW ) RouteModel = KW; break; // --- simulation start date case START_DATE: if ( !datetime_strToDate(s2, &StartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation start time of day case START_TIME: if ( !datetime_strToTime(s2, &StartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending date case END_DATE: if ( !datetime_strToDate(s2, &EndDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending time of day case END_TIME: if ( !datetime_strToTime(s2, &EndTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start date case REPORT_START_DATE: if ( !datetime_strToDate(s2, &ReportStartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start time of day case REPORT_START_TIME: if ( !datetime_strToTime(s2, &ReportStartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- day of year when street sweeping begins or when it ends // (year is arbitrarily set to 1947 so that the dayOfYear // function can be applied) case SWEEP_START: case SWEEP_END: strcpy(strDate, s2); strcat(strDate, "/1947"); if ( !datetime_strToDate(strDate, &aDate) ) { return error_setInpError(ERR_DATETIME, s2); } m = datetime_dayOfYear(aDate); if ( k == SWEEP_START ) SweepStart = m; else SweepEnd = m; break; // --- number of antecedent dry days case START_DRY_DAYS: StartDryDays = atof(s2); if ( StartDryDays < 0.0 ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- runoff or reporting time steps // (input is in hrs:min:sec format, time step saved as seconds) case WET_STEP: case DRY_STEP: case REPORT_STEP: if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_DATETIME, s2); } datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2); switch ( k ) { case WET_STEP: WetStep = s; break; case DRY_STEP: DryStep = s; break; case REPORT_STEP: ReportStep = s; break; } break; // --- type of damping applied to inertial terms of dynamic wave routing case INERT_DAMPING: m = findmatch(s2, InertDampingWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); else InertDamping = m; break; // --- Yes/No options (NO = 0, YES = 1) case ALLOW_PONDING: case SLOPE_WEIGHTING: case SKIP_STEADY_STATE: case IGNORE_RAINFALL: case IGNORE_SNOWMELT: case IGNORE_GWATER: case IGNORE_ROUTING: case IGNORE_QUALITY: case IGNORE_RDII: //(5.1.004) m = findmatch(s2, NoYesWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); switch ( k ) { case ALLOW_PONDING: AllowPonding = m; break; case SLOPE_WEIGHTING: SlopeWeighting = m; break; case SKIP_STEADY_STATE: SkipSteadyState = m; break; case IGNORE_RAINFALL: IgnoreRainfall = m; break; case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break; case IGNORE_GWATER: IgnoreGwater = m; break; case IGNORE_ROUTING: IgnoreRouting = m; break; case IGNORE_QUALITY: IgnoreQuality = m; break; case IGNORE_RDII: IgnoreRDII = m; break; //(5.1.004) } break; case NORMAL_FLOW_LTD: m = findmatch(s2, NormalFlowWords); //if ( m < 0 ) m = findmatch(s2, NoYesWords); DEPRECATED //(5.1.012) if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); NormalFlowLtd = m; break; case FORCE_MAIN_EQN: m = findmatch(s2, ForceMainEqnWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); ForceMainEqn = m; break; case LINK_OFFSETS: m = findmatch(s2, LinkOffsetWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); LinkOffsets = m; break; // --- compatibility option for selecting solution method for // dynamic wave flow routing (NOT CURRENTLY USED) case COMPATIBILITY: if ( strcomp(s2, "3") ) Compatibility = SWMM3; else if ( strcomp(s2, "4") ) Compatibility = SWMM4; else if ( strcomp(s2, "5") ) Compatibility = SWMM5; else return error_setInpError(ERR_KEYWORD, s2); break; // --- routing or lengthening time step (in decimal seconds) // (lengthening time step is used in Courant stability formula // to artificially lengthen conduits for dynamic wave flow routing // (a value of 0 means that no lengthening is used)) case ROUTE_STEP: case LENGTHENING_STEP: if ( !getDouble(s2, &tStep) ) { if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_NUMBER, s2); } else { datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; tStep = s; } } if ( k == ROUTE_STEP ) { if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2); RouteStep = tStep; } else LengtheningStep = MAX(0.0, tStep); break; //// Following code section added to release 5.1.008. //// //(5.1.008) // --- minimum variable time step for dynamic wave routing case MIN_ROUTE_STEP: if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 ) return error_setInpError(ERR_NUMBER, s2); break; case NUM_THREADS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); NumThreads = m; break; //// // --- safety factor applied to variable time step estimates under // dynamic wave flow routing (value of 0 indicates that variable // time step option not used) case VARIABLE_STEP: if ( !getDouble(s2, &CourantFactor) ) return error_setInpError(ERR_NUMBER, s2); if ( CourantFactor < 0.0 || CourantFactor > 2.0 ) return error_setInpError(ERR_NUMBER, s2); break; // --- minimum surface area (ft2 or sq. meters) associated with nodes // under dynamic wave flow routing case MIN_SURFAREA: MinSurfArea = atof(s2); break; // --- minimum conduit slope (%) case MIN_SLOPE: if ( !getDouble(s2, &MinSlope) ) return error_setInpError(ERR_NUMBER, s2); if ( MinSlope < 0.0 || MinSlope >= 100 ) return error_setInpError(ERR_NUMBER, s2); MinSlope /= 100.0; break; // --- maximum trials / time step for dynamic wave routing case MAX_TRIALS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); MaxTrials = m; break; // --- head convergence tolerance for dynamic wave routing case HEAD_TOL: if ( !getDouble(s2, &HeadTol) ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- steady state tolerance on system inflow - outflow case SYS_FLOW_TOL: if ( !getDouble(s2, &SysFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } SysFlowTol /= 100.0; break; // --- steady state tolerance on nodal lateral inflow case LAT_FLOW_TOL: if ( !getDouble(s2, &LatFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } LatFlowTol /= 100.0; break; case TEMPDIR: // Temporary Directory sstrncpy(TempDir, s2, MAXFNAME); break; } return 0; } //============================================================================= void initPointers() // // Input: none // Output: none // Purpose: assigns NULL to all dynamic arrays for a new project. // { Gage = NULL; Subcatch = NULL; Node = NULL; Outfall = NULL; Divider = NULL; Storage = NULL; Link = NULL; Conduit = NULL; Pump = NULL; Orifice = NULL; Weir = NULL; Outlet = NULL; Pollut = NULL; Landuse = NULL; Pattern = NULL; Curve = NULL; Tseries = NULL; Transect = NULL; Shape = NULL; Aquifer = NULL; UnitHyd = NULL; Snowmelt = NULL; Event = NULL; //(5.1.011) MemPoolAllocated = FALSE; } //============================================================================= void setDefaults() // // Input: none // Output: none // Purpose: assigns default values to project variables. // { int i, j; // Project title & temp. file path for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], ""); strcpy(TempDir, ""); // Interface files Frain.mode = SCRATCH_FILE; // Use scratch rainfall file Fclimate.mode = NO_FILE; Frunoff.mode = NO_FILE; Frdii.mode = NO_FILE; Fhotstart1.mode = NO_FILE; Fhotstart2.mode = NO_FILE; Finflows.mode = NO_FILE; Foutflows.mode = NO_FILE; Frain.file = NULL; Fclimate.file = NULL; Frunoff.file = NULL; Frdii.file = NULL; Fhotstart1.file = NULL; Fhotstart2.file = NULL; Finflows.file = NULL; Foutflows.file = NULL; Fout.file = NULL; Fout.mode = NO_FILE; // Analysis options UnitSystem = US; // US unit system FlowUnits = CFS; // CFS flow units InfilModel = HORTON; // Horton infiltration method RouteModel = KW; // Kin. wave flow routing method AllowPonding = FALSE; // No ponding at nodes InertDamping = SOME; // Partial inertial damping NormalFlowLtd = BOTH; // Default normal flow limitation ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets LengtheningStep = 0; // No lengthening of conduits CourantFactor = 0.0; // No variable time step MinSurfArea = 0.0; // Force use of default min. surface area MinSlope = 0.0; // No user supplied minimum conduit slope //(5.1.012) SkipSteadyState = FALSE; // Do flow routing in steady state periods IgnoreRainfall = FALSE; // Analyze rainfall/runoff IgnoreRDII = FALSE; // Analyze RDII //(5.1.004) IgnoreSnowmelt = FALSE; // Analyze snowmelt IgnoreGwater = FALSE; // Analyze groundwater IgnoreRouting = FALSE; // Analyze flow routing IgnoreQuality = FALSE; // Analyze water quality WetStep = 300; // Runoff wet time step (secs) DryStep = 3600; // Runoff dry time step (secs) RouteStep = 300.0; // Routing time step (secs) MinRouteStep = 0.5; // Minimum variable time step (sec) //(5.1.008) ReportStep = 900; // Reporting time step (secs) StartDryDays = 0.0; // Antecedent dry days MaxTrials = 0; // Force use of default max. trials HeadTol = 0.0; // Force use of default head tolerance SysFlowTol = 0.05; // System flow tolerance for steady state LatFlowTol = 0.05; // Lateral flow tolerance for steady state NumThreads = 0; // Number of parallel threads to use NumEvents = 0; // Number of detailed routing events //(5.1.011) // Deprecated options SlopeWeighting = TRUE; // Use slope weighting Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method // Starting & ending date/time StartDate = datetime_encodeDate(2004, 1, 1); StartTime = datetime_encodeTime(0,0,0); StartDateTime = StartDate + StartTime; EndDate = StartDate; EndTime = 0.0; ReportStartDate = NO_DATE; ReportStartTime = NO_DATE; SweepStart = 1; SweepEnd = 365; // Reporting options RptFlags.input = FALSE; RptFlags.continuity = TRUE; RptFlags.flowStats = TRUE; RptFlags.controls = FALSE; RptFlags.subcatchments = FALSE; RptFlags.nodes = FALSE; RptFlags.links = FALSE; RptFlags.nodeStats = FALSE; // Temperature data Temp.dataSource = NO_TEMP; Temp.tSeries = -1; Temp.ta = 70.0; Temp.elev = 0.0; Temp.anglat = 40.0; Temp.dtlong = 0.0; Temp.tmax = MISSING; // Wind speed data Wind.type = MONTHLY_WIND; for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0; // Snowmelt parameters Snow.snotmp = 34.0; Snow.tipm = 0.5; Snow.rnm = 0.6; // Snow areal depletion curves for pervious and impervious surfaces for ( i=0; i<2; i++ ) { for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0; } // Evaporation rates Evap.type = CONSTANT_EVAP; for (i=0; i<12; i++) { Evap.monthlyEvap[i] = 0.0; Evap.panCoeff[i] = 1.0; } Evap.recoveryPattern = -1; Evap.recoveryFactor = 1.0; Evap.tSeries = -1; Evap.dryOnly = FALSE; //// Following code segment added to release 5.1.007. //// //(5.1.007) //// // Climate adjustments for (i = 0; i < 12; i++) { Adjust.temp[i] = 0.0; // additive adjustments Adjust.evap[i] = 0.0; // additive adjustments Adjust.rain[i] = 1.0; // multiplicative adjustments Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments //(5.1.008) } Adjust.rainFactor = 1.0; Adjust.hydconFactor = 1.0; //(5.1.008) //// } //============================================================================= void openFiles(char *f1, char *f2, char *f3) // // Input: f1 = name of input file // f2 = name of report file // f3 = name of binary output file // Output: none // Purpose: opens a project's input and report files. // { // --- initialize file pointers to NULL Finp.file = NULL; Frpt.file = NULL; Fout.file = NULL; // --- save file names sstrncpy(Finp.name, f1, MAXFNAME); sstrncpy(Frpt.name, f2, MAXFNAME); sstrncpy(Fout.name, f3, MAXFNAME); // --- check that file names are not identical if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3)) { writecon(FMT11); ErrorCode = ERR_FILE_NAME; return; } // --- open input and report files if ((Finp.file = fopen(f1,"rt")) == NULL) { writecon(FMT12); writecon(f1); ErrorCode = ERR_INP_FILE; return; } if ((Frpt.file = fopen(f2,"wt")) == NULL) { writecon(FMT13); ErrorCode = ERR_RPT_FILE; return; } } //============================================================================= void createObjects() // // Input: none // Output: none // Purpose: allocates memory for project's objects. // // NOTE: number of each type of object has already been determined in // project_readInput(). // { int j, k; // --- allocate memory for each category of object if ( ErrorCode ) return; Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage)); Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch)); Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode)); Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall)); Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider)); Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage)); Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink)); Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit)); Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump)); Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice)); Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir)); Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet)); Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut)); Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse)); Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern)); Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable)); Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable)); Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer)); UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd)); Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt)); Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape)); //// Added to release 5.1.011. //// //(5.1.011) // --- create array of detailed routing event periods Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent)); Event[NumEvents].start = BIG; Event[NumEvents].end = BIG + 1.0; //// // --- create LID objects lid_create(Nobjects[LID], Nobjects[SUBCATCH]); // --- create control rules ErrorCode = controls_create(Nobjects[CONTROL]); if ( ErrorCode ) return; // --- create cross section transects ErrorCode = transect_create(Nobjects[TRANSECT]); if ( ErrorCode ) return; // --- allocate memory for infiltration data infil_create(Nobjects[SUBCATCH], InfilModel); // --- allocate memory for water quality state variables for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].initBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } for (j = 0; j < Nobjects[NODE]; j++) { Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].extInflow = NULL; Node[j].dwfInflow = NULL; Node[j].rdiiInflow = NULL; Node[j].treatment = NULL; } for (j = 0; j < Nobjects[LINK]; j++) { Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } // --- allocate memory for land use buildup/washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { Landuse[j].buildupFunc = (TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup)); Landuse[j].washoffFunc = (TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff)); } // --- allocate memory for subcatchment landuse factors for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].landFactor = (TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor)); for (k = 0; k < Nobjects[LANDUSE]; k++) { Subcatch[j].landFactor[k].buildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } } // --- initialize buildup & washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { for (k = 0; k < Nobjects[POLLUT]; k++) { Landuse[j].buildupFunc[k].funcType = NO_BUILDUP; Landuse[j].buildupFunc[k].normalizer = PER_AREA; Landuse[j].washoffFunc[k].funcType = NO_WASHOFF; } } // --- initialize rain gage properties for (j = 0; j < Nobjects[GAGE]; j++) { Gage[j].tSeries = -1; strcpy(Gage[j].fname, ""); } // --- initialize subcatchment properties for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].outSubcatch = -1; Subcatch[j].outNode = -1; Subcatch[j].infil = -1; Subcatch[j].groundwater = NULL; Subcatch[j].gwLatFlowExpr = NULL; //(5.1.007) Subcatch[j].gwDeepFlowExpr = NULL; //(5.1.007) Subcatch[j].snowpack = NULL; Subcatch[j].lidArea = 0.0; for (k = 0; k < Nobjects[POLLUT]; k++) { Subcatch[j].initBuildup[k] = 0.0; } } // --- initialize RDII unit hydrograph properties for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j); // --- initialize snowmelt properties for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j); // --- initialize storage node exfiltration //(5.1.007) for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL; //(5.1.007) // --- initialize link properties for (j = 0; j < Nobjects[LINK]; j++) { Link[j].xsect.type = -1; Link[j].cLossInlet = 0.0; Link[j].cLossOutlet = 0.0; Link[j].cLossAvg = 0.0; Link[j].hasFlapGate = FALSE; } for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1; // --- initialize reporting flags for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE; for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE; for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE; // --- initialize curves, time series, and time patterns for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]); for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]); for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j); } //============================================================================= void deleteObjects() // // Input: none // Output: none // Purpose: frees memory allocated for a project's objects. // // NOTE: care is taken to first free objects that are properties of another // object before the latter is freed (e.g., we must free a // subcatchment's land use factors before freeing the subcatchment). // { int j, k; // --- free memory for landuse factors & groundwater if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { for (k = 0; k < Nobjects[LANDUSE]; k++) { FREE(Subcatch[j].landFactor[k].buildup); } FREE(Subcatch[j].landFactor); FREE(Subcatch[j].groundwater); gwater_deleteFlowExpression(j); FREE(Subcatch[j].snowpack); } // --- free memory for buildup/washoff functions if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++) { FREE(Landuse[j].buildupFunc); FREE(Landuse[j].washoffFunc) } // --- free memory for water quality state variables if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { FREE(Subcatch[j].initBuildup); FREE(Subcatch[j].oldQual); FREE(Subcatch[j].newQual); FREE(Subcatch[j].pondedQual); FREE(Subcatch[j].totalLoad); } if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { FREE(Node[j].oldQual); FREE(Node[j].newQual); } if ( Link ) for (j = 0; j < Nobjects[LINK]; j++) { FREE(Link[j].oldQual); FREE(Link[j].newQual); FREE(Link[j].totalLoad); } // --- free memory used for rainfall infiltration infil_delete(); //// Added for release 5.1.007. //// //(5.1.007) //// // --- free memory used for storage exfiltration if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++) { if ( Storage[j].exfil ) { FREE(Storage[j].exfil->btmExfil); FREE(Storage[j].exfil->bankExfil); FREE(Storage[j].exfil); } } //// // --- free memory used for outfall pollutants loads //(5.1.008) if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++) //(5.1.008) FREE(Outfall[j].wRouted); //(5.1.008) // --- free memory used for nodal inflows & treatment functions if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { inflow_deleteExtInflows(j); inflow_deleteDwfInflows(j); rdii_deleteRdiiInflow(j); treatmnt_delete(j); } // --- delete table entries for curves and time series if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++) table_deleteEntries(&Tseries[j]); if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++) table_deleteEntries(&Curve[j]); // --- delete cross section transects transect_delete(); // --- delete control rules controls_delete(); // --- delete LIDs lid_delete(); // --- now free each major category of object FREE(Gage); FREE(Subcatch); FREE(Node); FREE(Outfall); FREE(Divider); FREE(Storage); FREE(Link); FREE(Conduit); FREE(Pump); FREE(Orifice); FREE(Weir); FREE(Outlet); FREE(Pollut); FREE(Landuse); FREE(Pattern); FREE(Curve); FREE(Tseries); FREE(Aquifer); FREE(UnitHyd); FREE(Snowmelt); FREE(Shape); FREE(Event); //(5.1.011) } //============================================================================= void createHashTables() // // Input: none // Output: returns error code // Purpose: allocates memory for object ID hash tables // { int j; MemPoolAllocated = FALSE; for (j = 0; j < MAX_OBJ_TYPES ; j++) { Htable[j] = HTcreate(); if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); } // --- initialize memory pool used to store object ID's if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); else MemPoolAllocated = TRUE; } //============================================================================= void deleteHashTables() // // Input: none // Output: none // Purpose: frees memory allocated for object ID hash tables // { int j; for (j = 0; j < MAX_OBJ_TYPES; j++) { if ( Htable[j] != NULL ) HTfree(Htable[j]); } // --- free object ID memory pool if ( MemPoolAllocated ) AllocFreePool(); } //=============================================================================
ellipticBuildContinuous.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" // compare on global indices int parallelCompareRowColumn(const void *a, const void *b){ nonZero_t *fa = (nonZero_t*) a; nonZero_t *fb = (nonZero_t*) b; if(fa->row < fb->row) return -1; if(fa->row > fb->row) return +1; if(fa->col < fb->col) return -1; if(fa->col > fb->col) return +1; return 0; } void ellipticBuildContinuousTri2D (elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); void ellipticBuildContinuousQuad2D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); void ellipticBuildContinuousTet3D (elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); void ellipticBuildContinuousHex3D (elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); void ellipticBuildContinuous(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { switch(elliptic->elementType){ case TRIANGLES: ellipticBuildContinuousTri2D(elliptic, lambda, A, nnz, ogs, globalStarts); break; case QUADRILATERALS: ellipticBuildContinuousQuad2D(elliptic, lambda, A, nnz, ogs, globalStarts); break; case TETRAHEDRA: ellipticBuildContinuousTet3D(elliptic, lambda, A, nnz, ogs, globalStarts); break; case HEXAHEDRA: ellipticBuildContinuousHex3D(elliptic, lambda, A, nnz, ogs, globalStarts); break; } } void ellipticBuildContinuousTri2D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { mesh2D *mesh = elliptic->mesh; setupAide options = elliptic->options; int rank = mesh->rank; //use the masked gs handle to define a global ordering // number of degrees of freedom on this rank (after gathering) hlong Ngather = elliptic->ogs->Ngather; dlong Ntotal = mesh->Np*mesh->Nelements; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<elliptic->ogs->Ngather;n++) { globalIds[n] = n + globalStarts[rank]; owner[n] = rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); free(globalIds); free(owner); // Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); dfloat *Srr = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); dfloat *Srs = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); dfloat *Sss = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); dfloat *MM = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); for (int n=0;n<mesh->Np;n++) { for (int m=0;m<mesh->Np;m++) { Srr[m+n*mesh->Np] = mesh->Srr[m+n*mesh->Np]; Srs[m+n*mesh->Np] = mesh->Srs[m+n*mesh->Np] + mesh->Ssr[m+n*mesh->Np]; Sss[m+n*mesh->Np] = mesh->Sss[m+n*mesh->Np]; MM[m+n*mesh->Np] = mesh->MM[m+n*mesh->Np]; } } if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); //Build unassembed non-zeros dlong cnt =0; for (dlong e=0;e<mesh->Nelements;e++) { dfloat Grr = mesh->ggeo[e*mesh->Nggeo + G00ID]; dfloat Grs = mesh->ggeo[e*mesh->Nggeo + G01ID]; dfloat Gss = mesh->ggeo[e*mesh->Nggeo + G11ID]; dfloat J = mesh->ggeo[e*mesh->Nggeo + GWJID]; for (int n=0;n<mesh->Np;n++) { if (globalNumbering[e*mesh->Np + n]<0) continue; //skip masked nodes for (int m=0;m<mesh->Np;m++) { if (globalNumbering[e*mesh->Np + m]<0) continue; //skip masked nodes dfloat val = 0.; val += Grr*Srr[m+n*mesh->Np]; val += Grs*Srs[m+n*mesh->Np]; val += Gss*Sss[m+n*mesh->Np]; val += J*lambda*MM[m+n*mesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { // pack non-zero sendNonZeros[cnt].val = val; sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + n]; sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + m]; sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + n]; cnt++; } } } } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather *nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; *nnz += ArecvCounts[r]; } *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<*nnz;++n){ if((*A)[n].row == (*A)[cnt].row && (*A)[n].col == (*A)[cnt].col){ (*A)[cnt].val += (*A)[n].val; } else{ ++cnt; (*A)[cnt] = (*A)[n]; } } if (*nnz) cnt++; *nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); free(sendNonZeros); free(globalNumbering); free(globalOwners); free(AsendCounts); free(ArecvCounts); free(AsendOffsets); free(ArecvOffsets); free(Srr); free(Srs); free(Sss); free(MM ); } void ellipticBuildContinuousQuad2D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { mesh2D *mesh = elliptic->mesh; setupAide options = elliptic->options; int rank = mesh->rank; //use the masked gs handle to define a global ordering // number of degrees of freedom on this rank (after gathering) hlong Ngather = elliptic->ogs->Ngather; dlong Ntotal = mesh->Np*mesh->Nelements; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<elliptic->ogs->Ngather;n++) { globalIds[n] = n + globalStarts[rank]; owner[n] = rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); free(globalIds); free(owner); // 2. Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *mask = (int *) calloc(mesh->Np*mesh->Nelements,sizeof(int)); for (dlong n=0;n<elliptic->Nmasked;n++) mask[elliptic->maskIds[n]] = 1; if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); //Build unassembed non-zeros dlong cnt =0; for (dlong e=0;e<mesh->Nelements;e++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { if (mask[e*mesh->Np + nx+ny*mesh->Nq]) continue; //skip masked nodes for (int my=0;my<mesh->Nq;my++) { for (int mx=0;mx<mesh->Nq;mx++) { if (mask[e*mesh->Np + mx+my*mesh->Nq]) continue; //skip masked nodes int id; dfloat val = 0.; if (ny==my) { for (int k=0;k<mesh->Nq;k++) { id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; val += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[mx+k*mesh->Nq]; } } id = mx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; val += Grs*mesh->D[nx+mx*mesh->Nq]*mesh->D[my+ny*mesh->Nq]; id = nx+my*mesh->Nq; dfloat Gsr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; val += Gsr*mesh->D[mx+nx*mesh->Nq]*mesh->D[ny+my*mesh->Nq]; if (nx==mx) { for (int k=0;k<mesh->Nq;k++) { id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; val += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[my+k*mesh->Nq]; } } if ((nx==mx)&&(ny==my)) { id = nx + ny*mesh->Nq; dfloat JW = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; val += JW*lambda; } dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { // pack non-zero sendNonZeros[cnt].val = val; sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + nx+ny*mesh->Nq]; sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + mx+my*mesh->Nq]; sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + nx+ny*mesh->Nq]; cnt++; } } } } } } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather *nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; *nnz += ArecvCounts[r]; } *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<*nnz;++n){ if((*A)[n].row == (*A)[cnt].row && (*A)[n].col == (*A)[cnt].col){ (*A)[cnt].val += (*A)[n].val; } else{ ++cnt; (*A)[cnt] = (*A)[n]; } } if (*nnz) cnt++; *nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); free(sendNonZeros); free(globalNumbering); free(globalOwners); free(AsendCounts); free(ArecvCounts); free(AsendOffsets); free(ArecvOffsets); } void ellipticBuildContinuousTet3D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { mesh2D *mesh = elliptic->mesh; setupAide options = elliptic->options; int rank = mesh->rank; //use the masked gs handle to define a global ordering // number of degrees of freedom on this rank (after gathering) hlong Ngather = elliptic->ogs->Ngather; dlong Ntotal = mesh->Np*mesh->Nelements; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<elliptic->ogs->Ngather;n++) { globalIds[n] = n + globalStarts[rank]; owner[n] = rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); free(globalIds); free(owner); // Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *mask = (int *) calloc(mesh->Np*mesh->Nelements,sizeof(int)); for (dlong n=0;n<elliptic->Nmasked;n++) mask[elliptic->maskIds[n]] = 1; //Build unassembed non-zeros if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); dlong cnt =0; #pragma omp parallel for for (dlong e=0;e<mesh->Nelements;e++) { dfloat Grr = mesh->ggeo[e*mesh->Nggeo + G00ID]; dfloat Grs = mesh->ggeo[e*mesh->Nggeo + G01ID]; dfloat Grt = mesh->ggeo[e*mesh->Nggeo + G02ID]; dfloat Gss = mesh->ggeo[e*mesh->Nggeo + G11ID]; dfloat Gst = mesh->ggeo[e*mesh->Nggeo + G12ID]; dfloat Gtt = mesh->ggeo[e*mesh->Nggeo + G22ID]; dfloat J = mesh->ggeo[e*mesh->Nggeo + GWJID]; for (int n=0;n<mesh->Np;n++) { if (mask[e*mesh->Np + n]) continue; //skip masked nodes for (int m=0;m<mesh->Np;m++) { if (mask[e*mesh->Np + m]) continue; //skip masked nodes dfloat val = 0.; val += Grr*mesh->Srr[m+n*mesh->Np]; val += Grs*mesh->Srs[m+n*mesh->Np]; val += Grt*mesh->Srt[m+n*mesh->Np]; val += Grs*mesh->Ssr[m+n*mesh->Np]; val += Gss*mesh->Sss[m+n*mesh->Np]; val += Gst*mesh->Sst[m+n*mesh->Np]; val += Grt*mesh->Str[m+n*mesh->Np]; val += Gst*mesh->Sts[m+n*mesh->Np]; val += Gtt*mesh->Stt[m+n*mesh->Np]; val += J*lambda*mesh->MM[m+n*mesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero sendNonZeros[cnt].val = val; sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + n]; sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + m]; sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + n]; cnt++; } } } } } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank] += 1; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather *nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; *nnz += ArecvCounts[r]; } *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<*nnz;++n){ if((*A)[n].row == (*A)[cnt].row && (*A)[n].col == (*A)[cnt].col){ (*A)[cnt].val += (*A)[n].val; } else{ ++cnt; (*A)[cnt] = (*A)[n]; } } if (*nnz) cnt++; *nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); free(sendNonZeros); free(globalNumbering); free(globalOwners); free(AsendCounts); free(ArecvCounts); free(AsendOffsets); free(ArecvOffsets); free(mask); } void ellipticBuildContinuousHex3D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { mesh2D *mesh = elliptic->mesh; setupAide options = elliptic->options; int rank = mesh->rank; //use the masked gs handle to define a global ordering // number of degrees of freedom on this rank (after gathering) hlong Ngather = elliptic->ogs->Ngather; dlong Ntotal = mesh->Np*mesh->Nelements; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<elliptic->ogs->Ngather;n++) { globalIds[n] = n + globalStarts[rank]; owner[n] = rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); free(globalIds); free(owner); // 2. Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *mask = (int *) calloc(mesh->Np*mesh->Nelements,sizeof(int)); for (dlong n=0;n<elliptic->Nmasked;n++) mask[elliptic->maskIds[n]] = 1; if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); dlong cnt =0; for (dlong e=0;e<mesh->Nelements;e++) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (mask[e*mesh->Np + idn]) continue; //skip masked nodes for (int mz=0;mz<mesh->Nq;mz++) { for (int my=0;my<mesh->Nq;my++) { for (int mx=0;mx<mesh->Nq;mx++) { int idm = mx+my*mesh->Nq+mz*mesh->Nq*mesh->Nq; if (mask[e*mesh->Np + idm]) continue; //skip masked nodes int id; dfloat val = 0.; if ((ny==my)&&(nz==mz)) { for (int k=0;k<mesh->Nq;k++) { id = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; val += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[mx+k*mesh->Nq]; } } if (nz==mz) { id = mx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grs = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; val += Grs*mesh->D[nx+mx*mesh->Nq]*mesh->D[my+ny*mesh->Nq]; id = nx+my*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gsr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; val += Gsr*mesh->D[mx+nx*mesh->Nq]*mesh->D[ny+my*mesh->Nq]; } if (ny==my) { id = mx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grt = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G02ID*mesh->Np]; val += Grt*mesh->D[nx+mx*mesh->Nq]*mesh->D[mz+nz*mesh->Nq]; id = nx+ny*mesh->Nq+mz*mesh->Nq*mesh->Nq; dfloat Gst = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G02ID*mesh->Np]; val += Gst*mesh->D[mx+nx*mesh->Nq]*mesh->D[nz+mz*mesh->Nq]; } if ((nx==mx)&&(nz==mz)) { for (int k=0;k<mesh->Nq;k++) { id = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; val += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[my+k*mesh->Nq]; } } if (nx==mx) { id = nx+my*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gst = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G12ID*mesh->Np]; val += Gst*mesh->D[ny+my*mesh->Nq]*mesh->D[mz+nz*mesh->Nq]; id = nx+ny*mesh->Nq+mz*mesh->Nq*mesh->Nq; dfloat Gts = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G12ID*mesh->Np]; val += Gts*mesh->D[my+ny*mesh->Nq]*mesh->D[nz+mz*mesh->Nq]; } if ((nx==mx)&&(ny==my)) { for (int k=0;k<mesh->Nq;k++) { id = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G22ID*mesh->Np]; val += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[mz+k*mesh->Nq]; } } if ((nx==mx)&&(ny==my)&&(nz==mz)) { id = nx + ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat JW = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; val += JW*lambda; } // pack non-zero dfloat nonZeroThreshold = 1e-7; if (fabs(val) >= nonZeroThreshold) { sendNonZeros[cnt].val = val; sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + idn]; sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + idm]; sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + idn]; cnt++; } } } } } } } } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather *nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; *nnz += ArecvCounts[r]; } *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<*nnz;++n){ if((*A)[n].row == (*A)[cnt].row && (*A)[n].col == (*A)[cnt].col){ (*A)[cnt].val += (*A)[n].val; } else{ ++cnt; (*A)[cnt] = (*A)[n]; } } if (*nnz) cnt++; *nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); free(sendNonZeros); free(globalNumbering); free(globalOwners); free(AsendCounts); free(ArecvCounts); free(AsendOffsets); free(ArecvOffsets); }
single.c
#include <omp.h> #define N 10 int main (int argc, char * argv[]){ double a[N]; int i; #pragma omp parallel //#pragma omp single private(i) if (omp_get_thread_num() == 0) { int i; for(i=0; i<N; i++) a[i] = 0; } }
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/animate.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImageChannel method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImageChannel(Image *image, % const ChannelType channel,const MagickEvaluateOperator op, % const double value,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ static MagickPixelPacket **DestroyPixelThreadSet(const Image *images, MagickPixelPacket **pixels) { register ssize_t i; size_t rows; assert(pixels != (MagickPixelPacket **) NULL); rows=MagickMax(GetImageListLength(images), (size_t) GetMagickResourceLimit(ThreadResource)); for (i=0; i < (ssize_t) rows; i++) if (pixels[i] != (MagickPixelPacket *) NULL) pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static MagickPixelPacket **AcquirePixelThreadSet(const Image *images) { const Image *next; MagickPixelPacket **pixels; register ssize_t i, j; size_t columns, rows; rows=MagickMax(GetImageListLength(images), (size_t) GetMagickResourceLimit(ThreadResource)); pixels=(MagickPixelPacket **) AcquireQuantumMemory(rows,sizeof(*pixels)); if (pixels == (MagickPixelPacket **) NULL) return((MagickPixelPacket **) NULL); (void) memset(pixels,0,rows*sizeof(*pixels)); columns=GetImageListLength(images); for (next=images; next != (Image *) NULL; next=next->next) columns=MagickMax(next->columns,columns); for (i=0; i < (ssize_t) rows; i++) { pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns, sizeof(**pixels)); if (pixels[i] == (MagickPixelPacket *) NULL) return(DestroyPixelThreadSet(images,pixels)); for (j=0; j < (ssize_t) columns; j++) GetMagickPixelPacket(images,&pixels[i][j]); } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const MagickPixelPacket *color_1, *color_2; int intensity; color_1=(const MagickPixelPacket *) x; color_2=(const MagickPixelPacket *) y; intensity=(int) MagickPixelIntensity(color_2)-(int) MagickPixelIntensity(color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info, const Quantum pixel,const MagickEvaluateOperator op, const MagickRealType value) { MagickRealType result; register ssize_t i; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(MagickRealType) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() which returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(MagickRealType) ((ssize_t) pixel & (ssize_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale* pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, ImpulseNoise,value); break; } case LaplacianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(double) pixel; for (i=0; i < (ssize_t) value; i++) result*=2.0; break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value* pixel+1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(MagickRealType) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MedianEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MinEvaluateOperator: { result=(MagickRealType) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(MagickRealType) (value*pixel); break; } case OrEvaluateOperator: { result=(MagickRealType) ((ssize_t) pixel | (ssize_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, PoissonNoise,value); break; } case PowEvaluateOperator: { if (pixel < 0) result=(MagickRealType) -(QuantumRange*pow((double) -(QuantumScale* pixel),(double) value)); else result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(MagickRealType) pixel; for (i=0; i < (ssize_t) value; i++) result/=2.0; break; } case RootMeanSquareEvaluateOperator: { result=((MagickRealType) pixel*pixel+value); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(MagickRealType) (pixel-value); break; } case SumEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, UniformNoise,value); break; } case XorEvaluateOperator: { result=(MagickRealType) ((ssize_t) pixel ^ (ssize_t) (value+0.5)); break; } } return(result); } static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception) { const Image *p, *q; size_t columns, number_channels, rows; q=images; columns=images->columns; rows=images->rows; number_channels=0; for (p=images; p != (Image *) NULL; p=p->next) { size_t channels; channels=3; if (p->matte != MagickFalse) channels+=1; if (p->colorspace == CMYKColorspace) channels+=1; if (channels > number_channels) { number_channels=channels; q=p; } if (p->columns > columns) columns=p->columns; if (p->rows > rows) rows=p->rows; } return(CloneImage(q,columns,rows,MagickTrue,exception)); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { MagickBooleanType status; status=EvaluateImageChannel(image,CompositeChannels,op,value,exception); return(status); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; Image *image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **magick_restrict evaluate_pixels, zero; RandomInfo **magick_restrict random_info; size_t number_images; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImage(image); return((Image *) NULL); } evaluate_pixels=AcquirePixelThreadSet(images); if (evaluate_pixels == (MagickPixelPacket **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; number_images=GetImageListLength(images); GetMagickPixelPacket(images,&zero); random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict evaluate_indexes; register MagickPixelPacket *evaluate_pixel; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_images; i++) evaluate_pixel[i]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),op,evaluate_pixel[i].red); evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),op,evaluate_pixel[i].green); evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),op,evaluate_pixel[i].blue); evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id], GetPixelAlpha(p),op,evaluate_pixel[i].opacity); if (image->colorspace == CMYKColorspace) evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id], *indexes,op,evaluate_pixel[i].index); image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue)); SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+i,ClampToQuantum( evaluate_pixel[i/2].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict evaluate_indexes; register ssize_t i, x; register MagickPixelPacket *evaluate_pixel; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) evaluate_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].red); evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].green); evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].blue); evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id], GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].opacity); if (image->colorspace == CMYKColorspace) evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id], GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].index); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (op == MeanEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { evaluate_pixel[x].red/=number_images; evaluate_pixel[x].green/=number_images; evaluate_pixel[x].blue/=number_images; evaluate_pixel[x].opacity/=number_images; evaluate_pixel[x].index/=number_images; } if (op == RootMeanSquareEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/ number_images); evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/ number_images); evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/ number_images); evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/ number_images); evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/ number_images); } if (op == MultiplyEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) { evaluate_pixel[x].red*=(MagickRealType) QuantumScale; evaluate_pixel[x].green*=(MagickRealType) QuantumScale; evaluate_pixel[x].blue*=(MagickRealType) QuantumScale; evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale; evaluate_pixel[x].index*=(MagickRealType) QuantumScale; } } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue)); SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+x,ClampToQuantum( evaluate_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImageChannel(Image *image, const ChannelType channel,const MagickEvaluateOperator op,const double value, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType result; if ((channel & RedChannel) != 0) { result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value); if (op == MeanEvaluateOperator) result/=2.0; SetPixelRed(q,ClampToQuantum(result)); } if ((channel & GreenChannel) != 0) { result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op, value); if (op == MeanEvaluateOperator) result/=2.0; SetPixelGreen(q,ClampToQuantum(result)); } if ((channel & BlueChannel) != 0) { result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op, value); if (op == MeanEvaluateOperator) result/=2.0; SetPixelBlue(q,ClampToQuantum(result)); } if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) { result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q), op,value); if (op == MeanEvaluateOperator) result/=2.0; SetPixelOpacity(q,ClampToQuantum(result)); } else { result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q), op,value); if (op == MeanEvaluateOperator) result/=2.0; SetPixelAlpha(q,ClampToQuantum(result)); } } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) { result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x), op,value); if (op == MeanEvaluateOperator) result/=2.0; SetPixelIndex(indexes+x,ClampToQuantum(result)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImageChannel method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % MagickBooleanType FunctionImageChannel(Image *image, % const ChannelType channel,const MagickFunction function, % const ssize_t number_parameters,const double *argument, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { MagickRealType result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* * Polynomial * Parameters: polynomial constants, highest to lowest order * For example: c0*x^3 + c1*x^2 + c2*x + c3 */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel + parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { /* Sinusoid Function * Parameters: Freq, Phase, Ampl, bias */ double freq,phase,ampl,bias; freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0; phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0; ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI* (freq*QuantumScale*pixel + phase/360.0) )) + bias ) ); break; } case ArcsinFunction: { /* Arcsin Function (peged at range limits for invalid results) * Parameters: Width, Center, Range, Bias */ double width,range,center,bias; width = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result = 2.0/width*(QuantumScale*pixel - center); if ( result <= -1.0 ) result = bias - range/2.0; else if ( result >= 1.0 ) result = bias + range/2.0; else result=(MagickRealType) (range/MagickPI*asin((double) result)+bias); result *= QuantumRange; break; } case ArctanFunction: { /* Arctan Function * Parameters: Slope, Center, Range, Bias */ double slope,range,center,bias; slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center)); result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double) result) + bias ) ); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { MagickBooleanType status; status=FunctionImageChannel(image,CompositeChannels,function, number_parameters,parameters,exception); return(status); } MagickExport MagickBooleanType FunctionImageChannel(Image *image, const ChannelType channel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateFunctionImage(image,channel,function,number_parameters, parameters,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ApplyFunction(GetPixelRed(q),function, number_parameters,parameters,exception)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function, number_parameters,parameters,exception)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function, number_parameters,parameters,exception)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function, number_parameters,parameters,exception)); else SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function, number_parameters,parameters,exception)); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function, number_parameters,parameters,exception)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l E n t r o p y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelEntropy() returns the entropy of one or more image channels. % % The format of the GetImageChannelEntropy method is: % % MagickBooleanType GetImageChannelEntropy(const Image *image, % const ChannelType channel,double *entropy,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o entropy: the average entropy of the selected channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception); return(status); } MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image, const ChannelType channel,double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; size_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageChannelStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); channels=0; channel_statistics[CompositeChannels].entropy=0.0; if ((channel & RedChannel) != 0) { channel_statistics[CompositeChannels].entropy+= channel_statistics[RedChannel].entropy; channels++; } if ((channel & GreenChannel) != 0) { channel_statistics[CompositeChannels].entropy+= channel_statistics[GreenChannel].entropy; channels++; } if ((channel & BlueChannel) != 0) { channel_statistics[CompositeChannels].entropy+= channel_statistics[BlueChannel].entropy; channels++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { channel_statistics[CompositeChannels].entropy+= channel_statistics[OpacityChannel].entropy; channels++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { channel_statistics[CompositeChannels].entropy+= channel_statistics[BlackChannel].entropy; channels++; } channel_statistics[CompositeChannels].entropy/=channels; *entropy=channel_statistics[CompositeChannels].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelExtrema() returns the extrema of one or more image channels. % % The format of the GetImageChannelExtrema method is: % % MagickBooleanType GetImageChannelExtrema(const Image *image, % const ChannelType channel,size_t *minima,size_t *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima, exception); return(status); } MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image, const ChannelType channel,size_t *minima,size_t *maxima, ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageChannelRange(image,channel,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelKurtosis() returns the kurtosis and skewness of one or more % image channels. % % The format of the GetImageChannelKurtosis method is: % % MagickBooleanType GetImageChannelKurtosis(const Image *image, % const ChannelType channel,double *kurtosis,double *skewness, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness, exception); return(status); } MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image, const ChannelType channel,double *kurtosis,double *skewness, ExceptionInfo *exception) { double area, mean, standard_deviation, sum_squares, sum_cubes, sum_fourth_power; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *kurtosis=0.0; *skewness=0.0; area=0.0; mean=0.0; standard_deviation=0.0; sum_squares=0.0; sum_cubes=0.0; sum_fourth_power=0.0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { mean+=GetPixelRed(p); sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p); sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p)*GetPixelRed(p); area++; } if ((channel & GreenChannel) != 0) { mean+=GetPixelGreen(p); sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p); sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p); sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); area++; } if ((channel & BlueChannel) != 0) { mean+=GetPixelBlue(p); sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p); sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p); sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); area++; } if ((channel & OpacityChannel) != 0) { mean+=GetPixelAlpha(p); sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p); sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)* GetPixelAlpha(p); sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)* GetPixelAlpha(p)*GetPixelAlpha(p); area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { double index; index=(double) GetPixelIndex(indexes+x); mean+=index; sum_squares+=index*index; sum_cubes+=index*index*index; sum_fourth_power+=index*index*index*index; area++; } p++; } } if (y < (ssize_t) image->rows) return(MagickFalse); if (area != 0.0) { mean/=area; sum_squares/=area; sum_cubes/=area; sum_fourth_power/=area; } standard_deviation=sqrt(sum_squares-(mean*mean)); if (standard_deviation != 0.0) { *kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares- 3.0*mean*mean*mean*mean; *kurtosis/=standard_deviation*standard_deviation*standard_deviation* standard_deviation; *kurtosis-=3.0; *skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean; *skewness/=standard_deviation*standard_deviation*standard_deviation; } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMean() returns the mean and standard deviation of one or more % image channels. % % The format of the GetImageChannelMean method is: % % MagickBooleanType GetImageChannelMean(const Image *image, % const ChannelType channel,double *mean,double *standard_deviation, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation, exception); return(status); } MagickExport MagickBooleanType GetImageChannelMean(const Image *image, const ChannelType channel,double *mean,double *standard_deviation, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; size_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageChannelStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); channels=0; channel_statistics[CompositeChannels].mean=0.0; channel_statistics[CompositeChannels].standard_deviation=0.0; if ((channel & RedChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[RedChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[RedChannel].standard_deviation; channels++; } if ((channel & GreenChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[GreenChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[GreenChannel].standard_deviation; channels++; } if ((channel & BlueChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlueChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlueChannel].standard_deviation; channels++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { channel_statistics[CompositeChannels].mean+= (QuantumRange-channel_statistics[OpacityChannel].mean); channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[OpacityChannel].standard_deviation; channels++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlackChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[CompositeChannels].standard_deviation; channels++; } channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].standard_deviation/=channels; *mean=channel_statistics[CompositeChannels].mean; *standard_deviation=channel_statistics[CompositeChannels].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageChannelMoments method is: % % ChannelMoments *GetImageChannelMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelMoments *GetImageChannelMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 ChannelMoments *channel_moments; double M00[CompositeChannels+1], M01[CompositeChannels+1], M02[CompositeChannels+1], M03[CompositeChannels+1], M10[CompositeChannels+1], M11[CompositeChannels+1], M12[CompositeChannels+1], M20[CompositeChannels+1], M21[CompositeChannels+1], M22[CompositeChannels+1], M30[CompositeChannels+1]; MagickPixelPacket pixel; PointInfo centroid[CompositeChannels+1]; ssize_t channel, channels, y; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_moments=(ChannelMoments *) AcquireQuantumMemory(length, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) memset(channel_moments,0,length*sizeof(*channel_moments)); (void) memset(centroid,0,sizeof(centroid)); (void) memset(M00,0,sizeof(M00)); (void) memset(M01,0,sizeof(M01)); (void) memset(M02,0,sizeof(M02)); (void) memset(M03,0,sizeof(M03)); (void) memset(M10,0,sizeof(M10)); (void) memset(M11,0,sizeof(M11)); (void) memset(M12,0,sizeof(M12)); (void) memset(M20,0,sizeof(M20)); (void) memset(M21,0,sizeof(M21)); (void) memset(M22,0,sizeof(M22)); (void) memset(M30,0,sizeof(M30)); GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; /* Compute center of mass (centroid). */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); M00[RedChannel]+=QuantumScale*pixel.red; M10[RedChannel]+=x*QuantumScale*pixel.red; M01[RedChannel]+=y*QuantumScale*pixel.red; M00[GreenChannel]+=QuantumScale*pixel.green; M10[GreenChannel]+=x*QuantumScale*pixel.green; M01[GreenChannel]+=y*QuantumScale*pixel.green; M00[BlueChannel]+=QuantumScale*pixel.blue; M10[BlueChannel]+=x*QuantumScale*pixel.blue; M01[BlueChannel]+=y*QuantumScale*pixel.blue; if (image->matte != MagickFalse) { M00[OpacityChannel]+=QuantumScale*pixel.opacity; M10[OpacityChannel]+=x*QuantumScale*pixel.opacity; M01[OpacityChannel]+=y*QuantumScale*pixel.opacity; } if (image->colorspace == CMYKColorspace) { M00[IndexChannel]+=QuantumScale*pixel.index; M10[IndexChannel]+=x*QuantumScale*pixel.index; M01[IndexChannel]+=y*QuantumScale*pixel.index; } p++; } } for (channel=0; channel <= CompositeChannels; channel++) { /* Compute center of mass (centroid). */ if (M00[channel] < MagickEpsilon) { M00[channel]+=MagickEpsilon; centroid[channel].x=(double) image->columns/2.0; centroid[channel].y=(double) image->rows/2.0; continue; } M00[channel]+=MagickEpsilon; centroid[channel].x=M10[channel]/M00[channel]; centroid[channel].y=M01[channel]/M00[channel]; } for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; /* Compute the image moments. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); M11[RedChannel]+=(x-centroid[RedChannel].x)*(y- centroid[RedChannel].y)*QuantumScale*pixel.red; M20[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*QuantumScale*pixel.red; M02[RedChannel]+=(y-centroid[RedChannel].y)*(y- centroid[RedChannel].y)*QuantumScale*pixel.red; M21[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale* pixel.red; M12[RedChannel]+=(x-centroid[RedChannel].x)*(y- centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale* pixel.red; M22[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y- centroid[RedChannel].y)*QuantumScale*pixel.red; M30[RedChannel]+=(x-centroid[RedChannel].x)*(x- centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale* pixel.red; M03[RedChannel]+=(y-centroid[RedChannel].y)*(y- centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale* pixel.red; M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y- centroid[GreenChannel].y)*QuantumScale*pixel.green; M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*QuantumScale*pixel.green; M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y- centroid[GreenChannel].y)*QuantumScale*pixel.green; M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale* pixel.green; M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y- centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale* pixel.green; M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y- centroid[GreenChannel].y)*QuantumScale*pixel.green; M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x- centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale* pixel.green; M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y- centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale* pixel.green; M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y- centroid[BlueChannel].y)*QuantumScale*pixel.blue; M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*QuantumScale*pixel.blue; M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y- centroid[BlueChannel].y)*QuantumScale*pixel.blue; M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale* pixel.blue; M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y- centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale* pixel.blue; M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y- centroid[BlueChannel].y)*QuantumScale*pixel.blue; M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x- centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale* pixel.blue; M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y- centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale* pixel.blue; if (image->matte != MagickFalse) { M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y- centroid[OpacityChannel].y)*QuantumScale*pixel.opacity; M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*QuantumScale*pixel.opacity; M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y- centroid[OpacityChannel].y)*QuantumScale*pixel.opacity; M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)* QuantumScale*pixel.opacity; M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y- centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)* QuantumScale*pixel.opacity; M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y- centroid[OpacityChannel].y)*QuantumScale*pixel.opacity; M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x- centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)* QuantumScale*pixel.opacity; M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y- centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)* QuantumScale*pixel.opacity; } if (image->colorspace == CMYKColorspace) { M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y- centroid[IndexChannel].y)*QuantumScale*pixel.index; M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*QuantumScale*pixel.index; M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y- centroid[IndexChannel].y)*QuantumScale*pixel.index; M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)* QuantumScale*pixel.index; M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y- centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)* QuantumScale*pixel.index; M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y- centroid[IndexChannel].y)*QuantumScale*pixel.index; M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x- centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)* QuantumScale*pixel.index; M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y- centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)* QuantumScale*pixel.index; } p++; } } channels=3; M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]); M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]); M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]); M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]); M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]); M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]); M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]); M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]); M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]); M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]); M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]); if (image->matte != MagickFalse) { channels+=1; M00[CompositeChannels]+=M00[OpacityChannel]; M01[CompositeChannels]+=M01[OpacityChannel]; M02[CompositeChannels]+=M02[OpacityChannel]; M03[CompositeChannels]+=M03[OpacityChannel]; M10[CompositeChannels]+=M10[OpacityChannel]; M11[CompositeChannels]+=M11[OpacityChannel]; M12[CompositeChannels]+=M12[OpacityChannel]; M20[CompositeChannels]+=M20[OpacityChannel]; M21[CompositeChannels]+=M21[OpacityChannel]; M22[CompositeChannels]+=M22[OpacityChannel]; M30[CompositeChannels]+=M30[OpacityChannel]; } if (image->colorspace == CMYKColorspace) { channels+=1; M00[CompositeChannels]+=M00[IndexChannel]; M01[CompositeChannels]+=M01[IndexChannel]; M02[CompositeChannels]+=M02[IndexChannel]; M03[CompositeChannels]+=M03[IndexChannel]; M10[CompositeChannels]+=M10[IndexChannel]; M11[CompositeChannels]+=M11[IndexChannel]; M12[CompositeChannels]+=M12[IndexChannel]; M20[CompositeChannels]+=M20[IndexChannel]; M21[CompositeChannels]+=M21[IndexChannel]; M22[CompositeChannels]+=M22[IndexChannel]; M30[CompositeChannels]+=M30[IndexChannel]; } M00[CompositeChannels]/=(double) channels; M01[CompositeChannels]/=(double) channels; M02[CompositeChannels]/=(double) channels; M03[CompositeChannels]/=(double) channels; M10[CompositeChannels]/=(double) channels; M11[CompositeChannels]/=(double) channels; M12[CompositeChannels]/=(double) channels; M20[CompositeChannels]/=(double) channels; M21[CompositeChannels]/=(double) channels; M22[CompositeChannels]/=(double) channels; M30[CompositeChannels]/=(double) channels; for (channel=0; channel <= CompositeChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0* PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+ sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])* (M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0* PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])- sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])* (M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0* M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel]))); if (fabs(M11[channel]) < 0.0) { if ((fabs(M20[channel]-M02[channel]) >= 0.0) && ((M20[channel]-M02[channel]) < 0.0)) channel_moments[channel].ellipse_angle+=90.0; } else if (M11[channel] < 0.0) { if (fabs(M20[channel]-M02[channel]) >= 0.0) { if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=180.0; } } else if ((fabs(M20[channel]-M02[channel]) >= 0.0) && ((M20[channel]-M02[channel]) < 0.0)) channel_moments[channel].ellipse_angle+=90.0; channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y* channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal( channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.x))); channel_moments[channel].ellipse_intensity=M00[channel]/ (MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= CompositeChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0); M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0); M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0); M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0); M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0); M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0); M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0); M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0); M00[channel]=1.0; } for (channel=0; channel <= CompositeChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].I[0]=M20[channel]+M02[channel]; channel_moments[channel].I[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].I[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].I[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelPerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImageChannelPerceptualHash method is: % % ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash( const Image *image,ExceptionInfo *exception) { ChannelMoments *moments; ChannelPerceptualHash *perceptual_hash; Image *hash_image; MagickBooleanType status; register ssize_t i; ssize_t channel; /* Blur then transform to sRGB colorspace. */ hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) return((ChannelPerceptualHash *) NULL); hash_image->depth=8; status=TransformImageColorspace(hash_image,sRGBColorspace); if (status == MagickFalse) return((ChannelPerceptualHash *) NULL); moments=GetImageChannelMoments(hash_image,exception); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) return((ChannelPerceptualHash *) NULL); perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( CompositeChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); for (channel=0; channel <= CompositeChannels; channel++) for (i=0; i < MaximumNumberOfImageMoments; i++) perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); /* Blur then transform to HCLp colorspace. */ hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) { perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory( perceptual_hash); return((ChannelPerceptualHash *) NULL); } hash_image->depth=8; status=TransformImageColorspace(hash_image,HCLpColorspace); if (status == MagickFalse) { perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory( perceptual_hash); return((ChannelPerceptualHash *) NULL); } moments=GetImageChannelMoments(hash_image,exception); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) { perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory( perceptual_hash); return((ChannelPerceptualHash *) NULL); } for (channel=0; channel <= CompositeChannels; channel++) for (i=0; i < MaximumNumberOfImageMoments; i++) perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelRange() returns the range of one or more image channels. % % The format of the GetImageChannelRange method is: % % MagickBooleanType GetImageChannelRange(const Image *image, % const ChannelType channel,double *minima,double *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image, double *minima,double *maxima,ExceptionInfo *exception) { return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelRange(const Image *image, const ChannelType channel,double *minima,double *maxima, ExceptionInfo *exception) { MagickPixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *maxima=(-MagickMaximumValue); *minima=MagickMaximumValue; GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((channel & RedChannel) != 0) { if (pixel.red < *minima) *minima=(double) pixel.red; if (pixel.red > *maxima) *maxima=(double) pixel.red; } if ((channel & GreenChannel) != 0) { if (pixel.green < *minima) *minima=(double) pixel.green; if (pixel.green > *maxima) *maxima=(double) pixel.green; } if ((channel & BlueChannel) != 0) { if (pixel.blue < *minima) *minima=(double) pixel.blue; if (pixel.blue > *maxima) *maxima=(double) pixel.blue; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { if ((QuantumRange-pixel.opacity) < *minima) *minima=(double) (QuantumRange-pixel.opacity); if ((QuantumRange-pixel.opacity) > *maxima) *maxima=(double) (QuantumRange-pixel.opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((double) pixel.index < *minima) *minima=(double) pixel.index; if ((double) pixel.index > *maxima) *maxima=(double) pixel.index; } p++; } } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelStatistics() returns statistics for each channel in the % image. The statistics include the channel depth, its minima, maxima, mean, % standard deviation, kurtosis and skewness. You can access the red channel % mean, for example, like this: % % channel_statistics=GetImageChannelStatistics(image,exception); % red_mean=channel_statistics[RedChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageChannelStatistics method is: % % ChannelStatistics *GetImageChannelStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area, standard_deviation; MagickPixelPacket number_bins, *histogram; QuantumAny range; register ssize_t i; size_t channels, depth, length; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length, sizeof(*channel_statistics)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U, sizeof(*histogram)); if ((channel_statistics == (ChannelStatistics *) NULL) || (histogram == (MagickPixelPacket *) NULL)) { if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (channel_statistics != (ChannelStatistics *) NULL) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } (void) memset(channel_statistics,0,length* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) CompositeChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } (void) memset(histogram,0,(MaxMap+1U)*sizeof(*histogram)); (void) memset(&number_bins,0,sizeof(number_bins)); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; /* Compute pixel statistics. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; ) { if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[RedChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) { channel_statistics[RedChannel].depth++; continue; } } if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[GreenChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) { channel_statistics[GreenChannel].depth++; continue; } } if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlueChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) { channel_statistics[BlueChannel].depth++; continue; } } if (image->matte != MagickFalse) { if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[OpacityChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse) { channel_statistics[OpacityChannel].depth++; continue; } } } if (image->colorspace == CMYKColorspace) { if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlackChannel].depth; range=GetQuantumRange(depth); if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) { channel_statistics[BlackChannel].depth++; continue; } } } if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima) channel_statistics[RedChannel].minima=(double) GetPixelRed(p); if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima) channel_statistics[RedChannel].maxima=(double) GetPixelRed(p); channel_statistics[RedChannel].sum+=GetPixelRed(p); channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)* GetPixelRed(p); channel_statistics[RedChannel].sum_cubed+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); channel_statistics[RedChannel].sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima) channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p); if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima) channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p); channel_statistics[GreenChannel].sum+=GetPixelGreen(p); channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)* GetPixelGreen(p); channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); channel_statistics[GreenChannel].sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p); if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima) channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p); if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima) channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p); channel_statistics[BlueChannel].sum+=GetPixelBlue(p); channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)* GetPixelBlue(p); channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); channel_statistics[BlueChannel].sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p); histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if (image->matte != MagickFalse) { if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima) channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p); if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima) channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p); channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p); channel_statistics[OpacityChannel].sum_squared+=(double) GetPixelAlpha(p)*GetPixelAlpha(p); channel_statistics[OpacityChannel].sum_cubed+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p); channel_statistics[OpacityChannel].sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p); histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++; } if (image->colorspace == CMYKColorspace) { if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima) channel_statistics[BlackChannel].minima=(double) GetPixelIndex(indexes+x); if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima) channel_statistics[BlackChannel].maxima=(double) GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_squared+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_cubed+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_fourth_power+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; } x++; p++; } } for (i=0; i < (ssize_t) CompositeChannels; i++) { double area, mean, standard_deviation; /* Normalize pixel statistics. */ area=PerceptibleReciprocal((double) image->columns*image->rows); mean=channel_statistics[i].sum*area; channel_statistics[i].sum=mean; channel_statistics[i].sum_squared*=area; channel_statistics[i].sum_cubed*=area; channel_statistics[i].sum_fourth_power*=area; channel_statistics[i].mean=mean; channel_statistics[i].variance=channel_statistics[i].sum_squared; standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean)); area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)* ((double) image->columns*image->rows); standard_deviation=sqrt(area*standard_deviation*standard_deviation); channel_statistics[i].standard_deviation=standard_deviation; } for (i=0; i < (ssize_t) (MaxMap+1U); i++) { if (histogram[i].red > 0.0) number_bins.red++; if (histogram[i].green > 0.0) number_bins.green++; if (histogram[i].blue > 0.0) number_bins.blue++; if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0)) number_bins.opacity++; if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0)) number_bins.index++; } area=PerceptibleReciprocal((double) image->columns*image->rows); for (i=0; i < (ssize_t) (MaxMap+1U); i++) { /* Compute pixel entropy. */ histogram[i].red*=area; channel_statistics[RedChannel].entropy+=-histogram[i].red* MagickLog10(histogram[i].red)* PerceptibleReciprocal(MagickLog10((double) number_bins.red)); histogram[i].green*=area; channel_statistics[GreenChannel].entropy+=-histogram[i].green* MagickLog10(histogram[i].green)* PerceptibleReciprocal(MagickLog10((double) number_bins.green)); histogram[i].blue*=area; channel_statistics[BlueChannel].entropy+=-histogram[i].blue* MagickLog10(histogram[i].blue)* PerceptibleReciprocal(MagickLog10((double) number_bins.blue)); if (image->matte != MagickFalse) { histogram[i].opacity*=area; channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity* MagickLog10(histogram[i].opacity)* PerceptibleReciprocal(MagickLog10((double) number_bins.opacity)); } if (image->colorspace == CMYKColorspace) { histogram[i].index*=area; channel_statistics[IndexChannel].entropy+=-histogram[i].index* MagickLog10(histogram[i].index)* PerceptibleReciprocal(MagickLog10((double) number_bins.index)); } } /* Compute overall statistics. */ for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double) channel_statistics[CompositeChannels].depth,(double) channel_statistics[i].depth); channel_statistics[CompositeChannels].minima=MagickMin( channel_statistics[CompositeChannels].minima, channel_statistics[i].minima); channel_statistics[CompositeChannels].maxima=EvaluateMax( channel_statistics[CompositeChannels].maxima, channel_statistics[i].maxima); channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum; channel_statistics[CompositeChannels].sum_squared+= channel_statistics[i].sum_squared; channel_statistics[CompositeChannels].sum_cubed+= channel_statistics[i].sum_cubed; channel_statistics[CompositeChannels].sum_fourth_power+= channel_statistics[i].sum_fourth_power; channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean; channel_statistics[CompositeChannels].variance+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; standard_deviation=sqrt(channel_statistics[i].variance- (channel_statistics[i].mean*channel_statistics[i].mean)); area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)* ((double) image->columns*image->rows); standard_deviation=sqrt(area*standard_deviation*standard_deviation); channel_statistics[CompositeChannels].standard_deviation=standard_deviation; channel_statistics[CompositeChannels].entropy+= channel_statistics[i].entropy; } channels=3; if (image->matte != MagickFalse) channels++; if (image->colorspace == CMYKColorspace) channels++; channel_statistics[CompositeChannels].sum/=channels; channel_statistics[CompositeChannels].sum_squared/=channels; channel_statistics[CompositeChannels].sum_cubed/=channels; channel_statistics[CompositeChannels].sum_fourth_power/=channels; channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].kurtosis/=channels; channel_statistics[CompositeChannels].skewness/=channels; channel_statistics[CompositeChannels].entropy/=channels; i=CompositeChannels; area=PerceptibleReciprocal((double) channels*image->columns*image->rows); channel_statistics[i].variance=channel_statistics[i].sum_squared; channel_statistics[i].mean=channel_statistics[i].sum; standard_deviation=sqrt(channel_statistics[i].variance- (channel_statistics[i].mean*channel_statistics[i].mean)); standard_deviation=sqrt(PerceptibleReciprocal((double) channels* image->columns*image->rows-1.0)*channels*image->columns*image->rows* standard_deviation*standard_deviation); channel_statistics[i].standard_deviation=standard_deviation; for (i=0; i <= (ssize_t) CompositeChannels; i++) { /* Compute kurtosis & skewness statistics. */ standard_deviation=PerceptibleReciprocal( channel_statistics[i].standard_deviation); channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0* channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0* channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation*standard_deviation)-3.0; } channel_statistics[CompositeChannels].mean=0.0; channel_statistics[CompositeChannels].standard_deviation=0.0; for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[CompositeChannels].mean+= channel_statistics[i].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[i].standard_deviation; } channel_statistics[CompositeChannels].mean/=(double) channels; channel_statistics[CompositeChannels].standard_deviation/=(double) channels; histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % Image *PolynomialImageChannel(const Image *images, % const size_t number_terms,const ChannelType channel, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o channel: the channel. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { Image *polynomial_image; polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms, terms,exception); return(polynomial_image); } MagickExport Image *PolynomialImageChannel(const Image *images, const ChannelType channel,const size_t number_terms,const double *terms, ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **magick_restrict polynomial_pixels, zero; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImage(image); return((Image *) NULL); } polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (MagickPixelPacket **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict polynomial_indexes; register MagickPixelPacket *polynomial_pixel; register PixelPacket *magick_restrict q; register ssize_t i, x; size_t number_images; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view); polynomial_pixel=polynomial_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) polynomial_pixel[x]=zero; next=images; number_images=GetImageListLength(images); for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; if (i >= (ssize_t) number_terms) break; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { double coefficient, degree; coefficient=terms[i << 1]; degree=terms[(i << 1)+1]; if ((channel & RedChannel) != 0) polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree); if ((channel & GreenChannel) != 0) polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green, degree); if ((channel & BlueChannel) != 0) polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue, degree); if ((channel & OpacityChannel) != 0) polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale* (QuantumRange-p->opacity),degree); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x], degree); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue)); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange* polynomial_pixel[x].opacity)); else SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange* polynomial_pixel[x].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange* polynomial_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(images,PolynomialImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % Image *StatisticImageChannel(const Image *image, % const ChannelType channel,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ #define ListChannels 5 typedef struct _ListNode { size_t next[9], count, signature; } ListNode; typedef struct _SkipList { ssize_t level; ListNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed, signature; SkipList lists[ListChannels]; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { register ssize_t i; if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); for (i=0; i < ListChannels; i++) if (pixel_list->lists[i].nodes != (ListNode *) NULL) pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory( pixel_list->lists[i].nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; register ssize_t i; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) memset((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; for (i=0; i < ListChannels; i++) { pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL, sizeof(*pixel_list->lists[i].nodes)); if (pixel_list->lists[i].nodes == (ListNode *) NULL) return(DestroyPixelList(pixel_list)); (void) memset(pixel_list->lists[i].nodes,0,65537UL* sizeof(*pixel_list->lists[i].nodes)); } pixel_list->signature=MagickCoreSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) memset(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel, const size_t color) { register SkipList *list; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ list=pixel_list->lists+channel; list->nodes[color].signature=pixel_list->signature; list->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=list->level; level >= 0; level--) { while (list->nodes[search].next[level] < color) search=list->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (list->level+2)) level=list->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > list->level) { list->level++; update[list->level]=65536UL; } /* Link the node into the skip-list. */ do { list->nodes[color].next[level]=list->nodes[update[level]].next[level]; list->nodes[update[level]].next[level]=color; } while (level-- > 0); } static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, maximum; ssize_t count; unsigned short channels[ListChannels]; /* Find the maximum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; maximum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color > maximum) maximum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) maximum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { MagickRealType sum; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the mean value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; do { color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; channels[channel]=(unsigned short) sum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the median value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; do { color=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); channels[channel]=(unsigned short) color; } GetMagickPixelPacket((const Image *) NULL,pixel); pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, minimum; ssize_t count; unsigned short channels[ListChannels]; /* Find the minimum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; count=0; color=65536UL; minimum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color < minimum) minimum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) minimum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, max_count, mode; ssize_t count; unsigned short channels[5]; /* Make each pixel the 'predominant color' of the specified neighborhood. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; mode=color; max_count=list->nodes[mode].count; count=0; do { color=list->nodes[color].next[0]; if (list->nodes[color].count > max_count) { mode=color; max_count=list->nodes[mode].count; } count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) mode; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, next, previous; ssize_t count; unsigned short channels[5]; /* Finds the non peak value for each of the colors. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; next=list->nodes[color].next[0]; count=0; do { previous=color; color=next; next=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; channels[channel]=(unsigned short) color; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetRootMeanSquarePixelList(PixelList *pixel_list, MagickPixelPacket *pixel) { MagickRealType sum; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the root mean square value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; do { color=list->nodes[color].next[0]; sum+=(MagickRealType) (list->nodes[color].count*color*color); count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; channels[channel]=(unsigned short) sqrt(sum); } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetStandardDeviationPixelList(PixelList *pixel_list, MagickPixelPacket *pixel) { MagickRealType sum, sum_squared; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the standard-deviation value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; for (i=0; i < (ssize_t) list->nodes[color].count; i++) sum_squared+=((MagickRealType) color)*((MagickRealType) color); count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum)); } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static inline void InsertPixelList(const Image *image,const PixelPacket *pixel, const IndexPacket *indexes,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(GetPixelRed(pixel)); signature=pixel_list->lists[0].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[0].nodes[index].count++; else AddNodePixelList(pixel_list,0,index); index=ScaleQuantumToShort(GetPixelGreen(pixel)); signature=pixel_list->lists[1].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[1].nodes[index].count++; else AddNodePixelList(pixel_list,1,index); index=ScaleQuantumToShort(GetPixelBlue(pixel)); signature=pixel_list->lists[2].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[2].nodes[index].count++; else AddNodePixelList(pixel_list,2,index); index=ScaleQuantumToShort(GetPixelOpacity(pixel)); signature=pixel_list->lists[3].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[3].nodes[index].count++; else AddNodePixelList(pixel_list,3,index); if (image->colorspace == CMYKColorspace) index=ScaleQuantumToShort(GetPixelIndex(indexes)); signature=pixel_list->lists[4].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[4].nodes[index].count++; else AddNodePixelList(pixel_list,4,index); } static void ResetPixelList(PixelList *pixel_list) { int level; register ListNode *root; register SkipList *list; register ssize_t channel; /* Reset the skip-list. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; root=list->nodes+65536UL; list->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; } pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { Image *statistic_image; statistic_image=StatisticImageChannel(image,DefaultChannels,type,width, height,exception); return(statistic_image); } MagickExport Image *StatisticImageChannel(const Image *image, const ChannelType channel,const StatisticType type,const size_t width, const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **magick_restrict pixel_list; size_t neighbor_height, neighbor_width; ssize_t y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); statistic_image=CloneImage(image,0,0,MagickTrue,exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse) { InheritException(exception,&statistic_image->exception); statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width; neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) : height; pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict statistic_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y- (ssize_t) (neighbor_height/2L),image->columns+neighbor_width, neighbor_height,exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view); for (x=0; x < (ssize_t) statistic_image->columns; x++) { MagickPixelPacket pixel; register const IndexPacket *magick_restrict s; register const PixelPacket *magick_restrict r; register ssize_t u, v; r=p; s=indexes+x; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) neighbor_height; v++) { for (u=0; u < (ssize_t) neighbor_width; u++) InsertPixelList(image,r+u,s+u,pixel_list[id]); r+=image->columns+neighbor_width; s+=image->columns+neighbor_width; } GetMagickPixelPacket(image,&pixel); SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+ neighbor_width*neighbor_height/2,&pixel); switch (type) { case GradientStatistic: { MagickPixelPacket maximum, minimum; GetMinimumPixelList(pixel_list[id],&pixel); minimum=pixel; GetMaximumPixelList(pixel_list[id],&pixel); maximum=pixel; pixel.red=MagickAbsoluteValue(maximum.red-minimum.red); pixel.green=MagickAbsoluteValue(maximum.green-minimum.green); pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue); pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity); if (image->colorspace == CMYKColorspace) pixel.index=MagickAbsoluteValue(maximum.index-minimum.index); break; } case MaximumStatistic: { GetMaximumPixelList(pixel_list[id],&pixel); break; } case MeanStatistic: { GetMeanPixelList(pixel_list[id],&pixel); break; } case MedianStatistic: default: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { GetMinimumPixelList(pixel_list[id],&pixel); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case RootMeanSquareStatistic: { GetRootMeanSquarePixelList(pixel_list[id],&pixel); break; } case StandardDeviationStatistic: { GetStandardDeviationPixelList(pixel_list[id],&pixel); break; } } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index)); p++; q++; } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StatisticImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
TaNra.h
// Copyright 2015 Christina Teflioudi // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * File: TANRA_all.h * Author: chteflio * * Created on August 24, 2015, 9:53 AM */ #ifndef TANRA_ALL_H #define TANRA_ALL_H namespace mips { class TaNra : public Mip { ProbeBucket probeBucket, probeBucketK; LempArguments args; std::vector<RetrievalArguments> retrArg; inline void printAlgoName(const VectorMatrix& leftMatrix) { if (args.isTARR) { logging << "TA_NRA (RR)" << "\t" << args.threads << "\t"; std::cout << "[ALGORITHM] TA_NRA (ROUND ROBIN) with " << args.threads << " thread(s)" << std::endl; } else { logging << "TA_NRA (MAX)" << "\t" << args.threads << "\t"; std::cout << "[ALGORITHM] TA_NRA (MAX HEAP) with " << args.threads << " thread(s)" << std::endl; } logging << "P(" << probeMatrix.rowNum << "x" << (0 + probeMatrix.colNum) << ")\t"; logging << "Q^T(" << leftMatrix.rowNum << "x" << (0 + leftMatrix.colNum) << ")\t"; } inline void initializeInternal(std::vector<VectorMatrix>& queryMatrices, VectorMatrix& leftMatrix) { std::cout << "[RETRIEVAL] QueryMatrix contains " << leftMatrix.rowNum << " vectors with dimensionality " << (0 + leftMatrix.colNum) << std::endl; row_type myNumThreads = args.threads; if (leftMatrix.rowNum < args.threads) { myNumThreads = leftMatrix.rowNum; std::cout << "[WARNING] Query matrix contains too few elements. Suboptimal running with " << myNumThreads << " thread(s)" << std::endl; } omp_set_num_threads(myNumThreads); queryMatrices.resize(myNumThreads); splitMatrices(leftMatrix, queryMatrices); for (row_type i = 0; i < myNumThreads; i++) { retrArg[i].initializeBasics(queryMatrices[i], probeMatrix, LEMP_TANRA, args.theta, args.k, args.threads, args.R, args.epsilon, 0, 0, false, args.isTARR); retrArg[i].init(probeMatrix.rowNum); retrArg[i].clear(); } } public: inline void setTheta(double theta) { args.theta = theta; } inline TaNra(InputArguments& input, bool isTARR) { args.copyInputArguments(input); args.isTARR = isTARR; // now do the logging logging.open(args.logFile.c_str(), std::ios_base::app); if (!logging.is_open()) { std::cout << "[WARNING] No log will be created!" << std::endl; } else { std::cout << "[INFO] Logging in " << args.logFile << std::endl; } omp_set_num_threads(args.threads); retrArg.resize(args.threads); }; inline void initialize(VectorMatrix& rightMatrix) { std::cout << "[INIT] ProbeMatrix contains " << rightMatrix.rowNum << " vectors with dimensionality " << (0 + rightMatrix.colNum) << std::endl; probeMatrix = rightMatrix; timer.start(); probeBucket.init(probeMatrix, 0, probeMatrix.rowNum, args); // initialize probeBucket.bucketScanThreshold = 0; probeBucket.normL2.second = std::numeric_limits<double>::max(); retriever_ptr rPtr(new tanraRetriever()); probeBucket.ptrRetriever = rPtr; if (probeBucket.ptrIndexes[SL] == 0) probeBucket.ptrIndexes[SL] = new QueueElementLists(); static_cast<QueueElementLists*> (probeBucket.ptrIndexes[SL])->initializeLists(probeMatrix, 0, probeMatrix.rowNum); timer.stop(); dataPreprocessingTimeRight += timer.elapsedTime().nanos(); } inline ~TaNra() { logging.close(); } inline void runAboveTheta(VectorMatrix& leftMatrix, Results& results) { printAlgoName(leftMatrix); std::vector<VectorMatrix> queryMatrices; initializeInternal(queryMatrices, leftMatrix); results.resultsVector.resize(args.threads); std::vector<row_type> blockOffsets; blockOffsets.push_back(0); std::cout << "[RETRIEVAL] Retrieval (theta = " << args.theta << ") starts ..." << std::endl; logging << "theta(" << args.theta << ")\t"; timer.start(); comp_type comparisons = 0; #pragma omp parallel reduction(+ : comparisons) { row_type tid = omp_get_thread_num(); bucketize(retrArg[tid].queryBatches, queryMatrices[tid], blockOffsets, args); probeBucket.ptrRetriever->run(probeBucket, &retrArg[tid]); results.moveAppend(retrArg[tid].results, tid); comparisons += retrArg[tid].comparisons; } timer.stop(); retrievalTime += timer.elapsedTime().nanos(); totalComparisons += comparisons; std::cout << "[RETRIEVAL] ... and is finished with " << results.getResultSize() << " results" << std::endl; logging << results.getResultSize() << "\t"; outputStats(); } }; } #endif /* TANRA_ALL_H */
scramble_edges.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #include "splittable_mrg.h" #include "graph_generator.h" #include "permutation_gen.h" #include "apply_permutation_mpi.h" #include "scramble_edges.h" #include "utils.h" #include <stdint.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef __MTA__ #include <sys/mta_task.h> #endif #ifdef GRAPH_GENERATOR_MPI #include <mpi.h> #endif #ifdef GRAPH_GENERATOR_OMP #include <omp.h> #endif /* This version is for sequential machines, OpenMP, and the XMT. */ void scramble_edges_shared(uint64_t userseed1, uint64_t userseed2, int64_t nedges, int64_t* result /* Input and output array of edges (size = 2 * nedges) */) { mrg_state st; uint_fast32_t seed[5]; int64_t* new_result; int64_t i; int64_t* perm = (int64_t*)xmalloc(nedges * sizeof(int64_t)); make_mrg_seed(userseed1, userseed2, seed); mrg_seed(&st, seed); mrg_skip(&st, 5, 0, 0); /* To make offset different from other PRNG uses */ rand_sort_shared(&st, nedges, perm); new_result = (int64_t*)xmalloc(nedges * 2 * sizeof(int64_t)); #ifdef __MTA__ #pragma mta assert parallel #pragma mta block schedule #endif #ifdef GRAPH_GENERATOR_OMP #pragma omp parallel for #endif for (i = 0; i < nedges; ++i) { int64_t p = perm[i]; new_result[i * 2 + 0] = result[p * 2 + 0]; new_result[i * 2 + 1] = result[p * 2 + 1]; } free(perm); memcpy(result, new_result, nedges * 2 * sizeof(int64_t)); free(new_result); } #ifdef GRAPH_GENERATOR_MPI /* For MPI distributed memory. */ void scramble_edges_mpi(MPI_Comm comm, const uint64_t userseed1, const uint64_t userseed2, const int64_t local_nedges_in, const int64_t* const local_edges_in, int64_t* const local_nedges_out_ptr, int64_t** const local_edges_out_ptr /* Allocated using xmalloc() by scramble_edges_mpi */) { int rank, size; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); mrg_state st; uint_fast32_t seed[5]; make_mrg_seed(userseed1, userseed2, seed); mrg_seed(&st, seed); mrg_skip(&st, 5, 0, 0); /* To make offset different from other PRNG uses */ int64_t total_nedges; MPI_Allreduce((void*)&local_nedges_in, &total_nedges, 1, INT64_T_MPI_TYPE, MPI_SUM, comm); int64_t local_nedges_out; /* = local permutation size */ int64_t* local_perm; rand_sort_mpi(comm, &st, total_nedges, &local_nedges_out, &local_perm); *local_nedges_out_ptr = local_nedges_out; /* Gather permutation information and fast owner lookup cache (code in * apply_permutation_mpi.c). */ int64_t* edge_displs = (int64_t*)xmalloc((size + 1) * sizeof(int64_t)); int* edge_owner_table; int64_t* edge_owner_cutoff; int lg_minedgecount; int64_t maxedgecount; gather_block_distribution_info(comm, local_nedges_in, total_nedges, edge_displs, &edge_owner_table, &edge_owner_cutoff, &lg_minedgecount, &maxedgecount); /* Originally from apply_permutation_mpi.c */ #define LOOKUP_EDGE_OWNER(v) \ (edge_owner_table[(v) >> lg_minedgecount] + \ ((v) >= edge_owner_cutoff[(v) >> lg_minedgecount])) /* Apply permutation. Output distribution is same as distribution of * generated edge permutation. */ /* Count number of requests to send to each destination. */ int* send_counts = (int*)xcalloc(size, sizeof(int)); /* Uses zero-init */ int64_t i; for (i = 0; i < local_nedges_out; ++i) { ++send_counts[LOOKUP_EDGE_OWNER(local_perm[i])]; } /* Prefix sum to get displacements. */ int* send_displs = (int*)xmalloc((size + 1) * sizeof(int)); send_displs[0] = 0; for (i = 0; i < size; ++i) { send_displs[i + 1] = send_displs[i] + send_counts[i]; } assert (send_displs[size] == local_nedges_out); /* Put edges into buffer by destination; also keep around index values for * where to write the result. */ int64_t* sendbuf = (int64_t*)xmalloc(local_nedges_out * sizeof(int64_t)); int64_t* reply_loc_buf = (int64_t*)xmalloc(local_nedges_out * sizeof(int64_t)); int* send_offsets = (int*)xmalloc((size + 1) * sizeof(int)); memcpy(send_offsets, send_displs, (size + 1) * sizeof(int)); for (i = 0; i < local_nedges_out; ++i) { int write_index = send_offsets[LOOKUP_EDGE_OWNER(local_perm[i])]; sendbuf[write_index] = local_perm[i]; reply_loc_buf[write_index] = i; ++send_offsets[LOOKUP_EDGE_OWNER(local_perm[i])]; } for (i = 0; i < size; ++i) assert (send_offsets[i] == send_displs[i + 1]); free(send_offsets); send_offsets = NULL; free(local_perm); local_perm = NULL; #undef LOOKUP_EDGE_OWNER free(edge_owner_table); edge_owner_table = NULL; free(edge_owner_cutoff); edge_owner_cutoff = NULL; /* Find out how many requests I will be receiving. */ int* recv_counts = (int*)xmalloc(size * sizeof(int)); MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, comm); /* Compute their displacements. */ int* recv_displs = (int*)xmalloc((size + 1) * sizeof(int)); recv_displs[0] = 0; for (i = 0; i < size; ++i) { recv_displs[i + 1] = recv_displs[i] + recv_counts[i]; } /* Make receive and reply buffers. */ int64_t* recvbuf = (int64_t*)xmalloc(recv_displs[size] * sizeof(int64_t)); int64_t* replybuf = (int64_t*)xmalloc(recv_displs[size] * 2 * sizeof(int64_t)); /* Move requests for edges into receive buffer. */ MPI_Alltoallv(sendbuf, send_counts, send_displs, INT64_T_MPI_TYPE, recvbuf, recv_counts, recv_displs, INT64_T_MPI_TYPE, comm); free(sendbuf); sendbuf = NULL; /* Put requested edges into response buffer. */ int64_t my_edge_offset = edge_displs[rank]; for (i = 0; i < recv_displs[size]; ++i) { replybuf[i * 2 + 0] = local_edges_in[(recvbuf[i] - my_edge_offset) * 2 + 0]; replybuf[i * 2 + 1] = local_edges_in[(recvbuf[i] - my_edge_offset) * 2 + 1]; } free(recvbuf); recvbuf = NULL; free(edge_displs); edge_displs = NULL; /* Send replies back. */ int64_t* reply_edges = (int64_t*)xmalloc(local_nedges_out * 2 * sizeof(int64_t)); for (i = 0; i < size; ++i) { /* Sending back two values for each request */ recv_counts[i] *= 2; recv_displs[i] *= 2; send_counts[i] *= 2; send_displs[i] *= 2; } MPI_Alltoallv(replybuf, recv_counts, recv_displs, INT64_T_MPI_TYPE, reply_edges, send_counts, send_displs, INT64_T_MPI_TYPE, comm); free(replybuf); replybuf = NULL; free(recv_counts); recv_counts = NULL; free(recv_displs); recv_displs = NULL; free(send_counts); send_counts = NULL; free(send_displs); send_displs = NULL; /* Make output array of edges. */ int64_t* local_edges_out = (int64_t*)xmalloc(local_nedges_out * 2 * sizeof(int64_t)); *local_edges_out_ptr = local_edges_out; /* Put edges into output array. */ for (i = 0; i < local_nedges_out; ++i) { local_edges_out[reply_loc_buf[i] * 2 + 0] = reply_edges[2 * i + 0]; local_edges_out[reply_loc_buf[i] * 2 + 1] = reply_edges[2 * i + 1]; } free(reply_loc_buf); reply_loc_buf = NULL; free(reply_edges); reply_edges = NULL; } #endif
hope.c
typedef signed char __int8_t; typedef unsigned char __uint8_t; typedef short __int16_t; typedef unsigned short __uint16_t; typedef int __int32_t; typedef unsigned int __uint32_t; typedef long long __int64_t; typedef unsigned long long __uint64_t; typedef long __darwin_intptr_t; typedef unsigned int __darwin_natural_t; typedef int __darwin_ct_rune_t; union stUn_imopVarPre0 { char __mbstate8[128]; long long _mbstateL; } ; typedef union stUn_imopVarPre0 __mbstate_t; typedef __mbstate_t __darwin_mbstate_t; typedef long int __darwin_ptrdiff_t; typedef long unsigned int __darwin_size_t; typedef __builtin_va_list __darwin_va_list; typedef int __darwin_wchar_t; typedef __darwin_wchar_t __darwin_rune_t; typedef int __darwin_wint_t; typedef unsigned long __darwin_clock_t; typedef __uint32_t __darwin_socklen_t; typedef long __darwin_ssize_t; typedef long __darwin_time_t; typedef __int64_t __darwin_blkcnt_t; typedef __int32_t __darwin_blksize_t; typedef __int32_t __darwin_dev_t; typedef unsigned int __darwin_fsblkcnt_t; typedef unsigned int __darwin_fsfilcnt_t; typedef __uint32_t __darwin_gid_t; typedef __uint32_t __darwin_id_t; typedef __uint64_t __darwin_ino64_t; typedef __darwin_ino64_t __darwin_ino_t; typedef __darwin_natural_t __darwin_mach_port_name_t; typedef __darwin_mach_port_name_t __darwin_mach_port_t; typedef __uint16_t __darwin_mode_t; typedef __int64_t __darwin_off_t; typedef __int32_t __darwin_pid_t; typedef __uint32_t __darwin_sigset_t; typedef __int32_t __darwin_suseconds_t; typedef __uint32_t __darwin_uid_t; typedef __uint32_t __darwin_useconds_t; typedef unsigned char __darwin_uuid_t[16]; typedef char __darwin_uuid_string_t[37]; struct __darwin_pthread_handler_rec { void ( *__routine )(void *); void *__arg; struct __darwin_pthread_handler_rec *__next; } ; struct _opaque_pthread_attr_t { long __sig; char __opaque[56]; } ; struct _opaque_pthread_cond_t { long __sig; char __opaque[40]; } ; struct _opaque_pthread_condattr_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_mutex_t { long __sig; char __opaque[56]; } ; struct _opaque_pthread_mutexattr_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_once_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_rwlock_t { long __sig; char __opaque[192]; } ; struct _opaque_pthread_rwlockattr_t { long __sig; char __opaque[16]; } ; struct _opaque_pthread_t { long __sig; struct __darwin_pthread_handler_rec *__cleanup_stack; char __opaque[8176]; } ; typedef struct _opaque_pthread_attr_t __darwin_pthread_attr_t; typedef struct _opaque_pthread_cond_t __darwin_pthread_cond_t; typedef struct _opaque_pthread_condattr_t __darwin_pthread_condattr_t; typedef unsigned long __darwin_pthread_key_t; typedef struct _opaque_pthread_mutex_t __darwin_pthread_mutex_t; typedef struct _opaque_pthread_mutexattr_t __darwin_pthread_mutexattr_t; typedef struct _opaque_pthread_once_t __darwin_pthread_once_t; typedef struct _opaque_pthread_rwlock_t __darwin_pthread_rwlock_t; typedef struct _opaque_pthread_rwlockattr_t __darwin_pthread_rwlockattr_t; typedef struct _opaque_pthread_t *__darwin_pthread_t; typedef int __darwin_nl_item; typedef int __darwin_wctrans_t; typedef __uint32_t __darwin_wctype_t; typedef __darwin_va_list va_list; typedef __darwin_size_t size_t; typedef __darwin_off_t fpos_t; struct __sbuf { unsigned char *_base; int _size; } ; struct __sFILEX ; struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int ( *_close )(void *); int ( *_read )(void *, char * , int ); fpos_t ( *_seek )(void *, fpos_t , int ); int ( *_write )(void *, const char * , int ); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; } ; typedef struct __sFILE FILE; int fclose(FILE *); int fgetc(FILE *); FILE *fopen(const char *restrict __filename, const char *restrict __mode); int fscanf(FILE *restrict , const char *restrict , ...); int printf(const char *restrict , ...); typedef __darwin_off_t off_t; typedef __darwin_ssize_t ssize_t; enum enum_imopVarPre1 { P_ALL, P_PID , P_PGID } ; typedef enum enum_imopVarPre1 idtype_t; typedef __darwin_pid_t pid_t; typedef __darwin_id_t id_t; typedef int sig_atomic_t; struct __darwin_i386_thread_state { unsigned int __eax; unsigned int __ebx; unsigned int __ecx; unsigned int __edx; unsigned int __edi; unsigned int __esi; unsigned int __ebp; unsigned int __esp; unsigned int __ss; unsigned int __eflags; unsigned int __eip; unsigned int __cs; unsigned int __ds; unsigned int __es; unsigned int __fs; unsigned int __gs; } ; struct __darwin_fp_control { unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , :2 , __pc: 2 , __rc: 2 , :1 , :3; } ; typedef struct __darwin_fp_control __darwin_fp_control_t; struct __darwin_fp_status { unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , __stkflt: 1 , __errsumm: 1 , __c0: 1 , __c1: 1 , __c2: 1 , __tos: 3 , __c3: 1 , __busy: 1; } ; typedef struct __darwin_fp_status __darwin_fp_status_t; struct __darwin_mmst_reg { char __mmst_reg[10]; char __mmst_rsrv[6]; } ; struct __darwin_xmm_reg { char __xmm_reg[16]; } ; struct __darwin_i386_float_state { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; char __fpu_rsrv4[14 * 16]; int __fpu_reserved1; } ; struct __darwin_i386_avx_state { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; char __fpu_rsrv4[14 * 16]; int __fpu_reserved1; char __avx_reserved1[64]; struct __darwin_xmm_reg __fpu_ymmh0; struct __darwin_xmm_reg __fpu_ymmh1; struct __darwin_xmm_reg __fpu_ymmh2; struct __darwin_xmm_reg __fpu_ymmh3; struct __darwin_xmm_reg __fpu_ymmh4; struct __darwin_xmm_reg __fpu_ymmh5; struct __darwin_xmm_reg __fpu_ymmh6; struct __darwin_xmm_reg __fpu_ymmh7; } ; struct __darwin_i386_exception_state { __uint16_t __trapno; __uint16_t __cpu; __uint32_t __err; __uint32_t __faultvaddr; } ; struct __darwin_x86_debug_state32 { unsigned int __dr0; unsigned int __dr1; unsigned int __dr2; unsigned int __dr3; unsigned int __dr4; unsigned int __dr5; unsigned int __dr6; unsigned int __dr7; } ; struct __darwin_x86_thread_state64 { __uint64_t __rax; __uint64_t __rbx; __uint64_t __rcx; __uint64_t __rdx; __uint64_t __rdi; __uint64_t __rsi; __uint64_t __rbp; __uint64_t __rsp; __uint64_t __r8; __uint64_t __r9; __uint64_t __r10; __uint64_t __r11; __uint64_t __r12; __uint64_t __r13; __uint64_t __r14; __uint64_t __r15; __uint64_t __rip; __uint64_t __rflags; __uint64_t __cs; __uint64_t __fs; __uint64_t __gs; } ; struct __darwin_x86_float_state64 { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; struct __darwin_xmm_reg __fpu_xmm8; struct __darwin_xmm_reg __fpu_xmm9; struct __darwin_xmm_reg __fpu_xmm10; struct __darwin_xmm_reg __fpu_xmm11; struct __darwin_xmm_reg __fpu_xmm12; struct __darwin_xmm_reg __fpu_xmm13; struct __darwin_xmm_reg __fpu_xmm14; struct __darwin_xmm_reg __fpu_xmm15; char __fpu_rsrv4[6 * 16]; int __fpu_reserved1; } ; struct __darwin_x86_avx_state64 { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; struct __darwin_xmm_reg __fpu_xmm8; struct __darwin_xmm_reg __fpu_xmm9; struct __darwin_xmm_reg __fpu_xmm10; struct __darwin_xmm_reg __fpu_xmm11; struct __darwin_xmm_reg __fpu_xmm12; struct __darwin_xmm_reg __fpu_xmm13; struct __darwin_xmm_reg __fpu_xmm14; struct __darwin_xmm_reg __fpu_xmm15; char __fpu_rsrv4[6 * 16]; int __fpu_reserved1; char __avx_reserved1[64]; struct __darwin_xmm_reg __fpu_ymmh0; struct __darwin_xmm_reg __fpu_ymmh1; struct __darwin_xmm_reg __fpu_ymmh2; struct __darwin_xmm_reg __fpu_ymmh3; struct __darwin_xmm_reg __fpu_ymmh4; struct __darwin_xmm_reg __fpu_ymmh5; struct __darwin_xmm_reg __fpu_ymmh6; struct __darwin_xmm_reg __fpu_ymmh7; struct __darwin_xmm_reg __fpu_ymmh8; struct __darwin_xmm_reg __fpu_ymmh9; struct __darwin_xmm_reg __fpu_ymmh10; struct __darwin_xmm_reg __fpu_ymmh11; struct __darwin_xmm_reg __fpu_ymmh12; struct __darwin_xmm_reg __fpu_ymmh13; struct __darwin_xmm_reg __fpu_ymmh14; struct __darwin_xmm_reg __fpu_ymmh15; } ; struct __darwin_x86_exception_state64 { __uint16_t __trapno; __uint16_t __cpu; __uint32_t __err; __uint64_t __faultvaddr; } ; struct __darwin_x86_debug_state64 { __uint64_t __dr0; __uint64_t __dr1; __uint64_t __dr2; __uint64_t __dr3; __uint64_t __dr4; __uint64_t __dr5; __uint64_t __dr6; __uint64_t __dr7; } ; struct __darwin_mcontext32 { struct __darwin_i386_exception_state __es; struct __darwin_i386_thread_state __ss; struct __darwin_i386_float_state __fs; } ; struct __darwin_mcontext_avx32 { struct __darwin_i386_exception_state __es; struct __darwin_i386_thread_state __ss; struct __darwin_i386_avx_state __fs; } ; struct __darwin_mcontext64 { struct __darwin_x86_exception_state64 __es; struct __darwin_x86_thread_state64 __ss; struct __darwin_x86_float_state64 __fs; } ; struct __darwin_mcontext_avx64 { struct __darwin_x86_exception_state64 __es; struct __darwin_x86_thread_state64 __ss; struct __darwin_x86_avx_state64 __fs; } ; typedef struct __darwin_mcontext64 *mcontext_t; typedef __darwin_pthread_attr_t pthread_attr_t; struct __darwin_sigaltstack { void *ss_sp; __darwin_size_t ss_size; int ss_flags; } ; typedef struct __darwin_sigaltstack stack_t; struct __darwin_ucontext { int uc_onstack; __darwin_sigset_t uc_sigmask; struct __darwin_sigaltstack uc_stack; struct __darwin_ucontext *uc_link; __darwin_size_t uc_mcsize; struct __darwin_mcontext64 *uc_mcontext; } ; typedef struct __darwin_ucontext ucontext_t; typedef __darwin_sigset_t sigset_t; typedef __darwin_uid_t uid_t; union sigval { int sival_int; void *sival_ptr; } ; struct sigevent { int sigev_notify; int sigev_signo; union sigval sigev_value; void ( *sigev_notify_function )(union sigval ); pthread_attr_t *sigev_notify_attributes; } ; struct __siginfo { int si_signo; int si_errno; int si_code; pid_t si_pid; uid_t si_uid; int si_status; void *si_addr; union sigval si_value; long si_band; unsigned long __pad[7]; } ; typedef struct __siginfo siginfo_t; union __sigaction_u { void ( *__sa_handler )(int ); void ( *__sa_sigaction )(int , struct __siginfo * , void *); } ; struct __sigaction { union __sigaction_u __sigaction_u; void ( *sa_tramp )(void *, int , int , siginfo_t * , void *); sigset_t sa_mask; int sa_flags; } ; struct sigaction { union __sigaction_u __sigaction_u; sigset_t sa_mask; int sa_flags; } ; typedef void ( *sig_t )(int ); struct sigvec { void ( *sv_handler )(int ); int sv_mask; int sv_flags; } ; struct sigstack { char *ss_sp; int ss_onstack; } ; typedef signed char int8_t; typedef short int16_t; typedef int int32_t; typedef long long int64_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; typedef unsigned long long uint64_t; typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; typedef __darwin_intptr_t intptr_t; typedef unsigned long uintptr_t; typedef long int intmax_t; typedef long unsigned int uintmax_t; struct timeval { __darwin_time_t tv_sec; __darwin_suseconds_t tv_usec; } ; typedef __uint64_t rlim_t; struct rusage { struct timeval ru_utime; struct timeval ru_stime; long ru_maxrss; long ru_ixrss; long ru_idrss; long ru_isrss; long ru_minflt; long ru_majflt; long ru_nswap; long ru_inblock; long ru_oublock; long ru_msgsnd; long ru_msgrcv; long ru_nsignals; long ru_nvcsw; long ru_nivcsw; } ; typedef void *rusage_info_t; struct rusage_info_v0 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; } ; struct rusage_info_v1 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; } ; struct rusage_info_v2 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; uint64_t ri_diskio_bytesread; uint64_t ri_diskio_byteswritten; } ; struct rusage_info_v3 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; uint64_t ri_diskio_bytesread; uint64_t ri_diskio_byteswritten; uint64_t ri_cpu_time_qos_default; uint64_t ri_cpu_time_qos_maintenance; uint64_t ri_cpu_time_qos_background; uint64_t ri_cpu_time_qos_utility; uint64_t ri_cpu_time_qos_legacy; uint64_t ri_cpu_time_qos_user_initiated; uint64_t ri_cpu_time_qos_user_interactive; uint64_t ri_billed_system_time; uint64_t ri_serviced_system_time; } ; typedef struct rusage_info_v3 rusage_info_current; struct rlimit { rlim_t rlim_cur; rlim_t rlim_max; } ; struct proc_rlimit_control_wakeupmon { uint32_t wm_flags; int32_t wm_rate; } ; union wait { int w_status; struct stUn_imopVarPre2 { unsigned int w_Termsig: 7, w_Coredump: 1 , w_Retcode: 8 , w_Filler: 16; } w_T; struct stUn_imopVarPre3 { unsigned int w_Stopval: 8, w_Stopsig: 8 , w_Filler: 16; } w_S; } ; typedef __darwin_ct_rune_t ct_rune_t; typedef __darwin_rune_t rune_t; typedef __darwin_wchar_t wchar_t; struct stUn_imopVarPre4 { int quot; int rem; } ; typedef struct stUn_imopVarPre4 div_t; struct stUn_imopVarPre5 { long quot; long rem; } ; typedef struct stUn_imopVarPre5 ldiv_t; struct stUn_imopVarPre6 { long long quot; long long rem; } ; typedef struct stUn_imopVarPre6 lldiv_t; void *malloc(size_t __size); typedef unsigned char u_int8_t; typedef unsigned short u_int16_t; typedef unsigned int u_int32_t; typedef unsigned long long u_int64_t; typedef int64_t register_t; typedef u_int64_t user_addr_t; typedef u_int64_t user_size_t; typedef int64_t user_ssize_t; typedef int64_t user_long_t; typedef u_int64_t user_ulong_t; typedef int64_t user_time_t; typedef int64_t user_off_t; typedef u_int64_t syscall_arg_t; typedef __darwin_dev_t dev_t; typedef __darwin_mode_t mode_t; typedef float float_t; typedef double double_t; extern double fabs(double ); extern double pow(double , double ); extern double sqrt(double ); struct __float2 { float __sinval; float __cosval; } ; struct __double2 { double __sinval; double __cosval; } ; struct exception { int type; char *name; double arg1; double arg2; double retval; } ; typedef int boolean; struct stUn_imopVarPre11 { double real; double imag; } ; typedef struct stUn_imopVarPre11 dcomplex; extern double randlc(double *, double ); extern void vranlc(int , double * , double , double *); extern void timer_clear(int ); extern void timer_start(int ); extern void timer_stop(int ); extern double timer_read(int ); extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand); static int nx[11 + 1]; static int ny[11 + 1]; static int nz[11 + 1]; static char Class; static int debug_vec[8]; static int m1[11 + 1]; static int m2[11 + 1]; static int m3[11 + 1]; static int lt; static int lb; static int is1; static int is2; static int is3; static int ie1; static int ie2; static int ie3; static void setup(int *n1, int *n2 , int *n3 , int lt); static void mg3P(double ****u, double ***v , double ****r , double a[4] , double c[4] , int n1 , int n2 , int n3 , int k); static void psinv(double ***r, double ***u , int n1 , int n2 , int n3 , double c[4] , int k); static void resid(double ***u, double ***v , double ***r , int n1 , int n2 , int n3 , double a[4] , int k); static void rprj3(double ***r, int m1k , int m2k , int m3k , double ***s , int m1j , int m2j , int m3j , int k); static void interp(double ***z, int mm1 , int mm2 , int mm3 , double ***u , int n1 , int n2 , int n3 , int k); static void norm2u3(double ***r, int n1 , int n2 , int n3 , double *rnm2 , double *rnmu , int nx , int ny , int nz); static void rep_nrm(double ***u, int n1 , int n2 , int n3 , char *title , int kk); static void comm3(double ***u, int n1 , int n2 , int n3 , int kk); static void zran3(double ***z, int n1 , int n2 , int n3 , int nx , int ny , int k); static void showall(double ***z, int n1 , int n2 , int n3); static double power(double a, int n); static void bubble(double ten[1037][2], int j1[1037][2] , int j2[1037][2] , int j3[1037][2] , int m , int ind); static void zero3(double ***z, int n1 , int n2 , int n3); int main(int argc, char *argv[]) { int k; int it; double t; double tinit; double mflops; int nthreads = 1; double ****u; double ***v; double ****r; double a[4]; double c[4]; double rnm2; double rnmu; double epsilon = 1.0e-8; int n1; int n2; int n3; int nit; double verify_value; boolean verified; int i; int j; int l; FILE *fp; timer_clear(1); timer_clear(2); timer_start(2); printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - MG Benchmark\n\n"); fp = fopen("mg.input", "r"); if (fp != ((void *) 0)) { printf(" Reading from input file mg.input\n"); int *_imopVarPre145; _imopVarPre145 = &lt; fscanf(fp, "%d", _imopVarPre145); int _imopVarPre147; _imopVarPre147 = fgetc(fp); while (_imopVarPre147 != '\n') { ; _imopVarPre147 = fgetc(fp); } int *_imopVarPre151; int *_imopVarPre152; int *_imopVarPre153; _imopVarPre151 = &nz[lt]; _imopVarPre152 = &ny[lt]; _imopVarPre153 = &nx[lt]; fscanf(fp, "%d%d%d", _imopVarPre153, _imopVarPre152, _imopVarPre151); int _imopVarPre155; _imopVarPre155 = fgetc(fp); while (_imopVarPre155 != '\n') { ; _imopVarPre155 = fgetc(fp); } int *_imopVarPre157; _imopVarPre157 = &nit; fscanf(fp, "%d", _imopVarPre157); int _imopVarPre159; _imopVarPre159 = fgetc(fp); while (_imopVarPre159 != '\n') { ; _imopVarPre159 = fgetc(fp); } for (i = 0; i <= 7; i++) { int *_imopVarPre161; _imopVarPre161 = &debug_vec[i]; fscanf(fp, "%d", _imopVarPre161); } fclose(fp); } else { printf(" No input file. Using compiled defaults\n"); lt = 5; nit = 4; nx[lt] = 32; ny[lt] = 32; nz[lt] = 32; for (i = 0; i <= 7; i++) { debug_vec[i] = 0; } } int _imopVarPre162; _imopVarPre162 = (nx[lt] != ny[lt]); if (!_imopVarPre162) { _imopVarPre162 = (nx[lt] != nz[lt]); } if (_imopVarPre162) { Class = 'U'; } else { int _imopVarPre164; _imopVarPre164 = nx[lt] == 32; if (_imopVarPre164) { _imopVarPre164 = nit == 4; } if (_imopVarPre164) { Class = 'S'; } else { int _imopVarPre166; _imopVarPre166 = nx[lt] == 64; if (_imopVarPre166) { _imopVarPre166 = nit == 40; } if (_imopVarPre166) { Class = 'W'; } else { int _imopVarPre168; _imopVarPre168 = nx[lt] == 256; if (_imopVarPre168) { _imopVarPre168 = nit == 20; } if (_imopVarPre168) { Class = 'B'; } else { int _imopVarPre170; _imopVarPre170 = nx[lt] == 512; if (_imopVarPre170) { _imopVarPre170 = nit == 20; } if (_imopVarPre170) { Class = 'C'; } else { int _imopVarPre172; _imopVarPre172 = nx[lt] == 256; if (_imopVarPre172) { _imopVarPre172 = nit == 4; } if (_imopVarPre172) { Class = 'A'; } else { Class = 'U'; } } } } } } a[0] = -8.0 / 3.0; a[1] = 0.0; a[2] = 1.0 / 6.0; a[3] = 1.0 / 12.0; int _imopVarPre173; int _imopVarPre174; _imopVarPre173 = Class == 'A'; if (!_imopVarPre173) { _imopVarPre174 = Class == 'S'; if (!_imopVarPre174) { _imopVarPre174 = Class == 'W'; } _imopVarPre173 = _imopVarPre174; } if (_imopVarPre173) { c[0] = -3.0 / 8.0; c[1] = 1.0 / 32.0; c[2] = -1.0 / 64.0; c[3] = 0.0; } else { c[0] = -3.0 / 17.0; c[1] = 1.0 / 33.0; c[2] = -1.0 / 61.0; c[3] = 0.0; } lb = 1; int *_imopVarPre178; int *_imopVarPre179; int *_imopVarPre180; _imopVarPre178 = &n3; _imopVarPre179 = &n2; _imopVarPre180 = &n1; setup(_imopVarPre180, _imopVarPre179, _imopVarPre178, lt); unsigned long int _imopVarPre183; void *_imopVarPre184; _imopVarPre183 = (lt + 1) * sizeof(double ***); _imopVarPre184 = malloc(_imopVarPre183); u = (double ****) _imopVarPre184; for (l = lt; l >= 1; l--) { unsigned long int _imopVarPre187; void *_imopVarPre188; _imopVarPre187 = m3[l] * sizeof(double **); _imopVarPre188 = malloc(_imopVarPre187); u[l] = (double ***) _imopVarPre188; for (k = 0; k < m3[l]; k++) { unsigned long int _imopVarPre191; void *_imopVarPre192; _imopVarPre191 = m2[l] * sizeof(double *); _imopVarPre192 = malloc(_imopVarPre191); u[l][k] = (double **) _imopVarPre192; for (j = 0; j < m2[l]; j++) { unsigned long int _imopVarPre195; void *_imopVarPre196; _imopVarPre195 = m1[l] * sizeof(double); _imopVarPre196 = malloc(_imopVarPre195); u[l][k][j] = (double *) _imopVarPre196; } } } unsigned long int _imopVarPre199; void *_imopVarPre200; _imopVarPre199 = m3[lt] * sizeof(double **); _imopVarPre200 = malloc(_imopVarPre199); v = (double ***) _imopVarPre200; for (k = 0; k < m3[lt]; k++) { unsigned long int _imopVarPre203; void *_imopVarPre204; _imopVarPre203 = m2[lt] * sizeof(double *); _imopVarPre204 = malloc(_imopVarPre203); v[k] = (double **) _imopVarPre204; for (j = 0; j < m2[lt]; j++) { unsigned long int _imopVarPre207; void *_imopVarPre208; _imopVarPre207 = m1[lt] * sizeof(double); _imopVarPre208 = malloc(_imopVarPre207); v[k][j] = (double *) _imopVarPre208; } } unsigned long int _imopVarPre211; void *_imopVarPre212; _imopVarPre211 = (lt + 1) * sizeof(double ***); _imopVarPre212 = malloc(_imopVarPre211); r = (double ****) _imopVarPre212; for (l = lt; l >= 1; l--) { unsigned long int _imopVarPre215; void *_imopVarPre216; _imopVarPre215 = m3[l] * sizeof(double **); _imopVarPre216 = malloc(_imopVarPre215); r[l] = (double ***) _imopVarPre216; for (k = 0; k < m3[l]; k++) { unsigned long int _imopVarPre219; void *_imopVarPre220; _imopVarPre219 = m2[l] * sizeof(double *); _imopVarPre220 = malloc(_imopVarPre219); r[l][k] = (double **) _imopVarPre220; for (j = 0; j < m2[l]; j++) { unsigned long int _imopVarPre223; void *_imopVarPre224; _imopVarPre223 = m1[l] * sizeof(double); _imopVarPre224 = malloc(_imopVarPre223); r[l][k][j] = (double *) _imopVarPre224; } } } double ***_imopVarPre226; _imopVarPre226 = u[lt]; zero3(_imopVarPre226, n1, n2, n3); int _imopVarPre229; int _imopVarPre230; _imopVarPre229 = ny[lt]; _imopVarPre230 = nx[lt]; zran3(v, n1, n2, n3, _imopVarPre230, _imopVarPre229, lt); int _imopVarPre236; int _imopVarPre237; int _imopVarPre238; double *_imopVarPre239; double *_imopVarPre240; _imopVarPre236 = nz[lt]; _imopVarPre237 = ny[lt]; _imopVarPre238 = nx[lt]; _imopVarPre239 = &rnmu; _imopVarPre240 = &rnm2; norm2u3(v, n1, n2, n3, _imopVarPre240, _imopVarPre239, _imopVarPre238, _imopVarPre237, _imopVarPre236); int _imopVarPre244; int _imopVarPre245; int _imopVarPre246; _imopVarPre244 = nz[lt]; _imopVarPre245 = ny[lt]; _imopVarPre246 = nx[lt]; printf(" Size: %3dx%3dx%3d (class %1c)\n", _imopVarPre246, _imopVarPre245, _imopVarPre244, Class); printf(" Iterations: %3d\n", nit); double ***_imopVarPre249; double ***_imopVarPre250; _imopVarPre249 = r[lt]; _imopVarPre250 = u[lt]; resid(_imopVarPre250, v, _imopVarPre249, n1, n2, n3, a, lt); int _imopVarPre257; int _imopVarPre258; int _imopVarPre259; double *_imopVarPre260; double *_imopVarPre261; double ***_imopVarPre262; _imopVarPre257 = nz[lt]; _imopVarPre258 = ny[lt]; _imopVarPre259 = nx[lt]; _imopVarPre260 = &rnmu; _imopVarPre261 = &rnm2; _imopVarPre262 = r[lt]; norm2u3(_imopVarPre262, n1, n2, n3, _imopVarPre261, _imopVarPre260, _imopVarPre259, _imopVarPre258, _imopVarPre257); mg3P(u, v, r, a, c, n1, n2, n3, lt); double ***_imopVarPre265; double ***_imopVarPre266; _imopVarPre265 = r[lt]; _imopVarPre266 = u[lt]; resid(_imopVarPre266, v, _imopVarPre265, n1, n2, n3, a, lt); int *_imopVarPre270; int *_imopVarPre271; int *_imopVarPre272; _imopVarPre270 = &n3; _imopVarPre271 = &n2; _imopVarPre272 = &n1; setup(_imopVarPre272, _imopVarPre271, _imopVarPre270, lt); double ***_imopVarPre274; _imopVarPre274 = u[lt]; zero3(_imopVarPre274, n1, n2, n3); int _imopVarPre277; int _imopVarPre278; _imopVarPre277 = ny[lt]; _imopVarPre278 = nx[lt]; zran3(v, n1, n2, n3, _imopVarPre278, _imopVarPre277, lt); timer_stop(2); timer_start(1); double ***_imopVarPre281; double ***_imopVarPre282; _imopVarPre281 = r[lt]; _imopVarPre282 = u[lt]; resid(_imopVarPre282, v, _imopVarPre281, n1, n2, n3, a, lt); int _imopVarPre289; int _imopVarPre290; int _imopVarPre291; double *_imopVarPre292; double *_imopVarPre293; double ***_imopVarPre294; _imopVarPre289 = nz[lt]; _imopVarPre290 = ny[lt]; _imopVarPre291 = nx[lt]; _imopVarPre292 = &rnmu; _imopVarPre293 = &rnm2; _imopVarPre294 = r[lt]; norm2u3(_imopVarPre294, n1, n2, n3, _imopVarPre293, _imopVarPre292, _imopVarPre291, _imopVarPre290, _imopVarPre289); for (it = 1; it <= nit; it++) { mg3P(u, v, r, a, c, n1, n2, n3, lt); double ***_imopVarPre297; double ***_imopVarPre298; _imopVarPre297 = r[lt]; _imopVarPre298 = u[lt]; resid(_imopVarPre298, v, _imopVarPre297, n1, n2, n3, a, lt); } int _imopVarPre305; int _imopVarPre306; int _imopVarPre307; double *_imopVarPre308; double *_imopVarPre309; double ***_imopVarPre310; _imopVarPre305 = nz[lt]; _imopVarPre306 = ny[lt]; _imopVarPre307 = nx[lt]; _imopVarPre308 = &rnmu; _imopVarPre309 = &rnm2; _imopVarPre310 = r[lt]; norm2u3(_imopVarPre310, n1, n2, n3, _imopVarPre309, _imopVarPre308, _imopVarPre307, _imopVarPre306, _imopVarPre305); #pragma omp parallel { } timer_stop(1); t = timer_read(1); tinit = timer_read(2); verified = 0; verify_value = 0.0; printf(" Initialization time: %15.3f seconds\n", tinit); printf(" Benchmark completed\n"); if (Class != 'U') { if (Class == 'S') { verify_value = 0.530770700573e-04; } else { if (Class == 'W') { verify_value = 0.250391406439e-17; } else { if (Class == 'A') { verify_value = 0.2433365309e-5; } else { if (Class == 'B') { verify_value = 0.180056440132e-5; } else { if (Class == 'C') { verify_value = 0.570674826298e-06; } } } } } double _imopVarPre313; double _imopVarPre314; _imopVarPre313 = rnm2 - verify_value; _imopVarPre314 = fabs(_imopVarPre313); if (_imopVarPre314 <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.12e\n", rnm2); double _imopVarPre316; _imopVarPre316 = rnm2 - verify_value; printf(" Error is %20.12e\n", _imopVarPre316); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" The correct L2 Norm is %20.12e\n", verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { int nn = nx[lt] * ny[lt] * nz[lt]; mflops = 58. * nit * nn * 1.0e-6 / t; } else { mflops = 0.0; } int _imopVarPre320; int _imopVarPre321; int _imopVarPre322; _imopVarPre320 = nz[lt]; _imopVarPre321 = ny[lt]; _imopVarPre322 = nx[lt]; c_print_results("MG", Class, _imopVarPre322, _imopVarPre321, _imopVarPre320, nit, nthreads, t, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randdp"); } static void setup(int *n1, int *n2 , int *n3 , int lt) { int k; for (k = lt - 1; k >= 1; k--) { nx[k] = nx[k + 1] / 2; ny[k] = ny[k + 1] / 2; nz[k] = nz[k + 1] / 2; } for (k = 1; k <= lt; k++) { m1[k] = nx[k] + 2; m2[k] = nz[k] + 2; m3[k] = ny[k] + 2; } is1 = 1; ie1 = nx[lt]; *n1 = nx[lt] + 2; is2 = 1; ie2 = ny[lt]; *n2 = ny[lt] + 2; is3 = 1; ie3 = nz[lt]; *n3 = nz[lt] + 2; if (debug_vec[1] >= 1) { printf(" in setup, \n"); printf(" lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); int _imopVarPre329; int _imopVarPre330; int _imopVarPre331; int _imopVarPre332; int _imopVarPre333; int _imopVarPre334; _imopVarPre329 = *n3; _imopVarPre330 = *n2; _imopVarPre331 = *n1; _imopVarPre332 = nz[lt]; _imopVarPre333 = ny[lt]; _imopVarPre334 = nx[lt]; printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", lt, _imopVarPre334, _imopVarPre333, _imopVarPre332, _imopVarPre331, _imopVarPre330, _imopVarPre329, is1, is2, is3, ie1, ie2, ie3); } } static void mg3P(double ****u, double ***v , double ****r , double a[4] , double c[4] , int n1 , int n2 , int n3 , int k) { int j; for (k = lt; k >= lb + 1; k--) { j = k - 1; int _imopVarPre343; int _imopVarPre344; int _imopVarPre345; double ***_imopVarPre346; int _imopVarPre347; int _imopVarPre348; int _imopVarPre349; double ***_imopVarPre350; _imopVarPre343 = m3[j]; _imopVarPre344 = m2[j]; _imopVarPre345 = m1[j]; _imopVarPre346 = r[j]; _imopVarPre347 = m3[k]; _imopVarPre348 = m2[k]; _imopVarPre349 = m1[k]; _imopVarPre350 = r[k]; rprj3(_imopVarPre350, _imopVarPre349, _imopVarPre348, _imopVarPre347, _imopVarPre346, _imopVarPre345, _imopVarPre344, _imopVarPre343, k); } k = lb; int _imopVarPre355; int _imopVarPre356; int _imopVarPre357; double ***_imopVarPre358; _imopVarPre355 = m3[lb]; _imopVarPre356 = m2[lb]; _imopVarPre357 = m1[lb]; _imopVarPre358 = u[lb]; zero3(_imopVarPre358, _imopVarPre357, _imopVarPre356, _imopVarPre355); int _imopVarPre364; int _imopVarPre365; int _imopVarPre366; double ***_imopVarPre367; double ***_imopVarPre368; _imopVarPre364 = m3[k]; _imopVarPre365 = m2[k]; _imopVarPre366 = m1[k]; _imopVarPre367 = u[k]; _imopVarPre368 = r[k]; psinv(_imopVarPre368, _imopVarPre367, _imopVarPre366, _imopVarPre365, _imopVarPre364, c, k); for (k = lb + 1; k <= lt - 1; k++) { j = k - 1; int _imopVarPre373; int _imopVarPre374; int _imopVarPre375; double ***_imopVarPre376; _imopVarPre373 = m3[k]; _imopVarPre374 = m2[k]; _imopVarPre375 = m1[k]; _imopVarPre376 = u[k]; zero3(_imopVarPre376, _imopVarPre375, _imopVarPre374, _imopVarPre373); int _imopVarPre385; int _imopVarPre386; int _imopVarPre387; double ***_imopVarPre388; int _imopVarPre389; int _imopVarPre390; int _imopVarPre391; double ***_imopVarPre392; _imopVarPre385 = m3[k]; _imopVarPre386 = m2[k]; _imopVarPre387 = m1[k]; _imopVarPre388 = u[k]; _imopVarPre389 = m3[j]; _imopVarPre390 = m2[j]; _imopVarPre391 = m1[j]; _imopVarPre392 = u[j]; interp(_imopVarPre392, _imopVarPre391, _imopVarPre390, _imopVarPre389, _imopVarPre388, _imopVarPre387, _imopVarPre386, _imopVarPre385, k); int _imopVarPre399; int _imopVarPre400; int _imopVarPre401; double ***_imopVarPre402; double ***_imopVarPre403; double ***_imopVarPre404; _imopVarPre399 = m3[k]; _imopVarPre400 = m2[k]; _imopVarPre401 = m1[k]; _imopVarPre402 = r[k]; _imopVarPre403 = r[k]; _imopVarPre404 = u[k]; resid(_imopVarPre404, _imopVarPre403, _imopVarPre402, _imopVarPre401, _imopVarPre400, _imopVarPre399, a, k); int _imopVarPre410; int _imopVarPre411; int _imopVarPre412; double ***_imopVarPre413; double ***_imopVarPre414; _imopVarPre410 = m3[k]; _imopVarPre411 = m2[k]; _imopVarPre412 = m1[k]; _imopVarPre413 = u[k]; _imopVarPre414 = r[k]; psinv(_imopVarPre414, _imopVarPre413, _imopVarPre412, _imopVarPre411, _imopVarPre410, c, k); } j = lt - 1; double ***_imopVarPre420; int _imopVarPre421; int _imopVarPre422; int _imopVarPre423; double ***_imopVarPre424; _imopVarPre420 = u[lt]; _imopVarPre421 = m3[j]; _imopVarPre422 = m2[j]; _imopVarPre423 = m1[j]; _imopVarPre424 = u[j]; interp(_imopVarPre424, _imopVarPre423, _imopVarPre422, _imopVarPre421, _imopVarPre420, n1, n2, n3, lt); double ***_imopVarPre427; double ***_imopVarPre428; _imopVarPre427 = r[lt]; _imopVarPre428 = u[lt]; resid(_imopVarPre428, v, _imopVarPre427, n1, n2, n3, a, lt); double ***_imopVarPre431; double ***_imopVarPre432; _imopVarPre431 = u[lt]; _imopVarPre432 = r[lt]; psinv(_imopVarPre432, _imopVarPre431, n1, n2, n3, c, k); } static void psinv(double ***r, double ***u , int n1 , int n2 , int n3 , double c[4] , int k) { int i3; int i2; int i1; double r1[1037]; double r2[1037]; #pragma omp parallel default(shared) private(i1, i2, i3, r1, r2) { #pragma omp for nowait for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2 - 1][i1] + r[i3][i2 + 1][i1] + r[i3 - 1][i2][i1] + r[i3 + 1][i2][i1]; r2[i1] = r[i3 - 1][i2 - 1][i1] + r[i3 - 1][i2 + 1][i1] + r[i3 + 1][i2 - 1][i1] + r[i3 + 1][i2 + 1][i1]; } for (i1 = 1; i1 < n1 - 1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * (r[i3][i2][i1 - 1] + r[i3][i2][i1 + 1] + r1[i1]) + c[2] * (r2[i1] + r1[i1 - 1] + r1[i1 + 1]); } } } // #pragma omp dummyFlush BARRIER_START written() read() #pragma omp barrier } comm3(u, n1, n2, n3, k); if (debug_vec[0] >= 1) { rep_nrm(u, n1, n2, n3, " psinv", k); } if (debug_vec[3] >= k) { showall(u, n1, n2, n3); } } static void resid(double ***u, double ***v , double ***r , int n1 , int n2 , int n3 , double a[4] , int k) { int i3; int i2; int i1; double u1[1037]; double u2[1037]; #pragma omp parallel default(shared) private(i1, i2, i3, u1, u2) { #pragma omp for nowait for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2 - 1][i1] + u[i3][i2 + 1][i1] + u[i3 - 1][i2][i1] + u[i3 + 1][i2][i1]; u2[i1] = u[i3 - 1][i2 - 1][i1] + u[i3 - 1][i2 + 1][i1] + u[i3 + 1][i2 - 1][i1] + u[i3 + 1][i2 + 1][i1]; } for (i1 = 1; i1 < n1 - 1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] - a[2] * (u2[i1] + u1[i1 - 1] + u1[i1 + 1]) - a[3] * (u2[i1 - 1] + u2[i1 + 1]); } } } // #pragma omp dummyFlush BARRIER_START written() read() #pragma omp barrier } comm3(r, n1, n2, n3, k); if (debug_vec[0] >= 1) { rep_nrm(r, n1, n2, n3, " resid", k); } if (debug_vec[2] >= k) { showall(r, n1, n2, n3); } } static void rprj3(double ***r, int m1k , int m2k , int m3k , double ***s , int m1j , int m2j , int m3j , int k) { int j3; int j2; int j1; int i3; int i2; int i1; int d1; int d2; int d3; double x1[1037]; double y1[1037]; double x2; double y2; if (m1k == 3) { d1 = 2; } else { d1 = 1; } if (m2k == 3) { d2 = 2; } else { d2 = 1; } if (m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp parallel default(shared) private(j1, j2, j3, i1, i2, i3, x1, y1, x2, y2) { #pragma omp for nowait for (j3 = 1; j3 < m3j - 1; j3++) { i3 = 2 * j3 - d3; for (j2 = 1; j2 < m2j - 1; j2++) { i2 = 2 * j2 - d2; for (j1 = 1; j1 < m1j; j1++) { i1 = 2 * j1 - d1; x1[i1] = r[i3 + 1][i2][i1] + r[i3 + 1][i2 + 2][i1] + r[i3][i2 + 1][i1] + r[i3 + 2][i2 + 1][i1]; y1[i1] = r[i3][i2][i1] + r[i3 + 2][i2][i1] + r[i3][i2 + 2][i1] + r[i3 + 2][i2 + 2][i1]; } for (j1 = 1; j1 < m1j - 1; j1++) { i1 = 2 * j1 - d1; y2 = r[i3][i2][i1 + 1] + r[i3 + 2][i2][i1 + 1] + r[i3][i2 + 2][i1 + 1] + r[i3 + 2][i2 + 2][i1 + 1]; x2 = r[i3 + 1][i2][i1 + 1] + r[i3 + 1][i2 + 2][i1 + 1] + r[i3][i2 + 1][i1 + 1] + r[i3 + 2][i2 + 1][i1 + 1]; s[j3][j2][j1] = 0.5 * r[i3 + 1][i2 + 1][i1 + 1] + 0.25 * (r[i3 + 1][i2 + 1][i1] + r[i3 + 1][i2 + 1][i1 + 2] + x2) + 0.125 * (x1[i1] + x1[i1 + 2] + y2) + 0.0625 * (y1[i1] + y1[i1 + 2]); } } } // #pragma omp dummyFlush BARRIER_START written() read() #pragma omp barrier } int _imopVarPre434; _imopVarPre434 = k - 1; comm3(s, m1j, m2j, m3j, _imopVarPre434); if (debug_vec[0] >= 1) { int _imopVarPre436; _imopVarPre436 = k - 1; rep_nrm(s, m1j, m2j, m3j, " rprj3", _imopVarPre436); } if (debug_vec[4] >= k) { showall(s, m1j, m2j, m3j); } } static void interp(double ***z, int mm1 , int mm2 , int mm3 , double ***u , int n1 , int n2 , int n3 , int k) { int i3; int i2; int i1; int d1; int d2; int d3; int t1; int t2; int t3; double z1[1037]; double z2[1037]; double z3[1037]; int _imopVarPre439; int _imopVarPre440; _imopVarPre439 = n1 != 3; if (_imopVarPre439) { _imopVarPre440 = n2 != 3; if (_imopVarPre440) { _imopVarPre440 = n3 != 3; } _imopVarPre439 = _imopVarPre440; } if (_imopVarPre439) { #pragma omp parallel default(shared) private(i1, i2, i3, z1, z2, z3) { #pragma omp for nowait for (i3 = 0; i3 < mm3 - 1; i3++) { for (i2 = 0; i2 < mm2 - 1; i2++) { for (i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2 + 1][i1] + z[i3][i2][i1]; z2[i1] = z[i3 + 1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3 + 1][i2 + 1][i1] + z[i3 + 1][i2][i1] + z1[i1]; } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3][2 * i2][2 * i1] = u[2 * i3][2 * i2][2 * i1] + z[i3][i2][i1]; u[2 * i3][2 * i2][2 * i1 + 1] = u[2 * i3][2 * i2][2 * i1 + 1] + 0.5 * (z[i3][i2][i1 + 1] + z[i3][i2][i1]); } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3][2 * i2 + 1][2 * i1] = u[2 * i3][2 * i2 + 1][2 * i1] + 0.5 * z1[i1]; u[2 * i3][2 * i2 + 1][2 * i1 + 1] = u[2 * i3][2 * i2 + 1][2 * i1 + 1] + 0.25 * (z1[i1] + z1[i1 + 1]); } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3 + 1][2 * i2][2 * i1] = u[2 * i3 + 1][2 * i2][2 * i1] + 0.5 * z2[i1]; u[2 * i3 + 1][2 * i2][2 * i1 + 1] = u[2 * i3 + 1][2 * i2][2 * i1 + 1] + 0.25 * (z2[i1] + z2[i1 + 1]); } for (i1 = 0; i1 < mm1 - 1; i1++) { u[2 * i3 + 1][2 * i2 + 1][2 * i1] = u[2 * i3 + 1][2 * i2 + 1][2 * i1] + 0.25 * z3[i1]; u[2 * i3 + 1][2 * i2 + 1][2 * i1 + 1] = u[2 * i3 + 1][2 * i2 + 1][2 * i1 + 1] + 0.125 * (z3[i1] + z3[i1 + 1]); } } } // #pragma omp dummyFlush BARRIER_START written() read([i3, m2.f, u, m3, heapCell#5, nz.f, _imopVarPre491, n1, resid, tmp, n3, norm2u3, r, heapCell#3, nullCell, m2, m1.f, nz, ny.f, n2, n2, heapCell#9, _imopVarPre495, showall, i3, debug_vec.f, _imopVarPre487, lb, heapCell#1, heapCell#0, m1, s, n3, n1, nx.f, ny, debug_vec, heapCell#10, r, a.f, heapCell#7, v, m3.f, nx, a, heapCell#8, heapCell#2, printf, fabs, heapCell#4, rep_nrm, heapCell#6]) #pragma omp barrier } } else { if (n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if (n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if (n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } #pragma omp parallel default(shared) private(i1, i2, i3) { #pragma omp for nowait for (i3 = d3; i3 <= mm3 - 1; i3++) { for (i2 = d2; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]; } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] + 0.5 * (z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2 - 1][i1 - 1]); } } for (i2 = 1; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] + 0.5 * (z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - d3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] + 0.25 * (z[i3 - 1][i2][i1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } } } // #pragma omp dummyFlush BARRIER_START written() read([i3, u, m2.f, m3, heapCell#5, nz.f, u, _imopVarPre491, n1, resid, t3, tmp, n3, r, norm2u3, heapCell#3, nullCell, m2, m1.f, mm1, nz, ny.f, n2, n2, heapCell#9, z, t2, _imopVarPre495, showall, i3, debug_vec.f, _imopVarPre487, lb, heapCell#1, heapCell#0, m1, d2, mm2, s, n3, n1, nx.f, ny, t1, debug_vec, heapCell#10, r, a.f, d1, heapCell#7, i3, v, mm3, m3.f, a, nx, heapCell#8, heapCell#2, printf, fabs, heapCell#4, rep_nrm, heapCell#6]) #pragma omp barrier #pragma omp for nowait for (i3 = 1; i3 <= mm3 - 1; i3++) { for (i2 = d2; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - d1 - 1] + 0.5 * (z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - d2 - 1][2 * i1 - t1 - 1] + 0.25 * (z[i3][i2 - 1][i1] + z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2 - 1][i1 - 1]); } } for (i2 = 1; i2 <= mm2 - 1; i2++) { for (i1 = d1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - d1 - 1] + 0.25 * (z[i3][i2][i1 - 1] + z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } for (i1 = 1; i1 <= mm1 - 1; i1++) { u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] = u[2 * i3 - t3 - 1][2 * i2 - t2 - 1][2 * i1 - t1 - 1] + 0.125 * (z[i3][i2][i1] + z[i3][i2 - 1][i1] + z[i3][i2][i1 - 1] + z[i3][i2 - 1][i1 - 1] + z[i3 - 1][i2][i1] + z[i3 - 1][i2 - 1][i1] + z[i3 - 1][i2][i1 - 1] + z[i3 - 1][i2 - 1][i1 - 1]); } } } } } if (debug_vec[0] >= 1) { int _imopVarPre442; _imopVarPre442 = k - 1; rep_nrm(z, mm1, mm2, mm3, "z: inter", _imopVarPre442); rep_nrm(u, n1, n2, n3, "u: inter", k); } if (debug_vec[5] >= k) { showall(z, mm1, mm2, mm3); showall(u, n1, n2, n3); } } static void norm2u3(double ***r, int n1 , int n2 , int n3 , double *rnm2 , double *rnmu , int nx , int ny , int nz) { double s = 0.0; int i3; int i2; int i1; int n; double a = 0.0; double tmp = 0.0; n = nx * ny * nz; #pragma omp parallel default(shared) private(i1, i2, i3, a) reduction(+:s) reduction(max:tmp) { #pragma omp for nowait for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 1; i1 < n1 - 1; i1++) { s = s + r[i3][i2][i1] * r[i3][i2][i1]; double _imopVarPre444; double _imopVarPre445; _imopVarPre444 = r[i3][i2][i1]; _imopVarPre445 = fabs(_imopVarPre444); if (_imopVarPre445 > tmp) { tmp = _imopVarPre445; } } } } // #pragma omp dummyFlush BARRIER_START written([tmp, heapCell#7, s, heapCell#10, heapCell#6, nullCell]) read([ie1, i3, r, sqrt, m2.f, u, m3, timer_stop, heapCell#5, nz.f, m3j, _imopVarPre491, n2, n1, resid, d2, d1, n1, tmp, n3, c.f, norm2u3, r, heapCell#3, nullCell, m2, u, m1.f, ie2, nz, ny.f, n2, n2, heapCell#9, n3, _imopVarPre495, showall, i3, malloc, debug_vec.f, lb, is1, _imopVarPre487, heapCell#1, zero3, heapCell#0, i3, m1, mg3P, z, ie3, s, n3, m2j, n1, ny, r, nx.f, n3, psinv, j3, debug_vec, is2, heapCell#10, setup, r, i3, a.f, heapCell#7, lt, c_print_results, Class, v, m3.f, rprj3, a, nx, s, n1, heapCell#8, heapCell#2, d3, printf, c, n2, fabs, is3, m1j, heapCell#4, rep_nrm, heapCell#6, timer_read]) #pragma omp barrier } *rnmu = tmp; double _imopVarPre447; double _imopVarPre448; _imopVarPre447 = s / (double) n; _imopVarPre448 = sqrt(_imopVarPre447); *rnm2 = _imopVarPre448; } static void rep_nrm(double ***u, int n1 , int n2 , int n3 , char *title , int kk) { double rnm2; double rnmu; int _imopVarPre454; int _imopVarPre455; int _imopVarPre456; double *_imopVarPre457; double *_imopVarPre458; _imopVarPre454 = nz[kk]; _imopVarPre455 = ny[kk]; _imopVarPre456 = nx[kk]; _imopVarPre457 = &rnmu; _imopVarPre458 = &rnm2; norm2u3(u, n1, n2, n3, _imopVarPre458, _imopVarPre457, _imopVarPre456, _imopVarPre455, _imopVarPre454); printf(" Level%2d in %8s: norms =%21.14e%21.14e\n", kk, title, rnm2, rnmu); } static void comm3(double ***u, int n1 , int n2 , int n3 , int kk) { int i1; int i2; int i3; #pragma omp parallel default(shared) private(i1, i2, i3) { #pragma omp for nowait for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { u[i3][i2][n1 - 1] = u[i3][i2][1]; u[i3][i2][0] = u[i3][i2][n1 - 2]; } for (i1 = 0; i1 < n1; i1++) { u[i3][n2 - 1][i1] = u[i3][1][i1]; u[i3][0][i1] = u[i3][n2 - 2][i1]; } } // #pragma omp dummyFlush BARRIER_START written() read([ie1, i3, r, n2, m2.f, u, m3, heapCell#5, nz.f, m3j, _imopVarPre491, n2, n1, resid, d2, d1, n1, tmp, n3, c.f, norm2u3, r, heapCell#3, nullCell, m2, u, m1.f, n3, ie2, nz, ny.f, n2, n2, heapCell#9, n3, _imopVarPre495, showall, i3, malloc, debug_vec.f, lb, is1, _imopVarPre487, heapCell#1, zero3, heapCell#0, i3, m1, mg3P, z, ie3, s, n3, m2j, n1, ny, r, nx.f, u, n3, psinv, j3, debug_vec, is2, i2, heapCell#10, setup, r, i3, a.f, heapCell#7, lt, v, m3.f, rprj3, n1, a, nx, s, n1, heapCell#8, heapCell#2, d3, printf, c, n2, fabs, is3, m1j, heapCell#4, rep_nrm, heapCell#6]) #pragma omp barrier #pragma omp for nowait for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { u[n3 - 1][i2][i1] = u[1][i2][i1]; u[0][i2][i1] = u[n3 - 2][i2][i1]; } } } } static void zran3(double ***z, int n1 , int n2 , int n3 , int nx , int ny , int k) { int i0; int m0; int m1; int i1; int i2; int i3; int d1; int e1; int e2; int e3; double xx; double x0; double x1; double a1; double a2; double ai; double ten[10][2]; double best; int i; int j1[10][2]; int j2[10][2]; int j3[10][2]; int jg[4][10][2]; double rdummy; double _imopVarPre460; double _imopVarPre461; _imopVarPre460 = pow(5.0, 13); _imopVarPre461 = power(_imopVarPre460, nx); a1 = _imopVarPre461; int _imopVarPre464; double _imopVarPre465; double _imopVarPre466; _imopVarPre464 = nx * ny; _imopVarPre465 = pow(5.0, 13); _imopVarPre466 = power(_imopVarPre465, _imopVarPre464); a2 = _imopVarPre466; zero3(z, n1, n2, n3); i = is1 - 1 + nx * (is2 - 1 + ny * (is3 - 1)); double _imopVarPre468; double _imopVarPre469; _imopVarPre468 = pow(5.0, 13); _imopVarPre469 = power(_imopVarPre468, i); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = 314159265.e0; double *_imopVarPre471; double _imopVarPre472; _imopVarPre471 = &x0; _imopVarPre472 = randlc(_imopVarPre471, _imopVarPre469); rdummy = _imopVarPre472; for (i3 = 1; i3 < e3; i3++) { x1 = x0; for (i2 = 1; i2 < e2; i2++) { xx = x0; double *_imopVarPre476; double _imopVarPre477; double *_imopVarPre478; _imopVarPre476 = &(z[i3][i2][0]); _imopVarPre477 = pow(5.0, 13); _imopVarPre478 = &xx; vranlc(d1, _imopVarPre478, _imopVarPre477, _imopVarPre476); double *_imopVarPre480; double _imopVarPre481; _imopVarPre480 = &x1; _imopVarPre481 = randlc(_imopVarPre480, a1); rdummy = _imopVarPre481; } double *_imopVarPre483; double _imopVarPre484; _imopVarPre483 = &x0; _imopVarPre484 = randlc(_imopVarPre483, a2); rdummy = _imopVarPre484; } for (i = 0; i < 10; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } for (i3 = 1; i3 < n3 - 1; i3++) { for (i2 = 1; i2 < n2 - 1; i2++) { for (i1 = 1; i1 < n1 - 1; i1++) { if (z[i3][i2][i1] > ten[0][1]) { ten[0][1] = z[i3][i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble(ten, j1, j2, j3, 10, 1); } if (z[i3][i2][i1] < ten[0][0]) { ten[0][0] = z[i3][i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble(ten, j1, j2, j3, 10, 0); } } } } i1 = 10 - 1; i0 = 10 - 1; for (i = 10 - 1; i >= 0; i--) { best = z[j3[i1][1]][j2[i1][1]][j1[i1][1]]; if (best == z[j3[i1][1]][j2[i1][1]][j1[i1][1]]) { jg[0][i][1] = 0; jg[1][i][1] = is1 - 1 + j1[i1][1]; jg[2][i][1] = is2 - 1 + j2[i1][1]; jg[3][i][1] = is3 - 1 + j3[i1][1]; i1 = i1 - 1; } else { jg[0][i][1] = 0; jg[1][i][1] = 0; jg[2][i][1] = 0; jg[3][i][1] = 0; } ten[i][1] = best; best = z[j3[i0][0]][j2[i0][0]][j1[i0][0]]; if (best == z[j3[i0][0]][j2[i0][0]][j1[i0][0]]) { jg[0][i][0] = 0; jg[1][i][0] = is1 - 1 + j1[i0][0]; jg[2][i][0] = is2 - 1 + j2[i0][0]; jg[3][i][0] = is3 - 1 + j3[i0][0]; i0 = i0 - 1; } else { jg[0][i][0] = 0; jg[1][i][0] = 0; jg[2][i][0] = 0; jg[3][i][0] = 0; } ten[i][0] = best; } m1 = i1 + 1; m0 = i0 + 1; #pragma omp parallel private(i2, i1) { #pragma omp for nowait for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } // #pragma omp dummyFlush BARRIER_START written() read() #pragma omp barrier } for (i = 10 - 1; i >= m0; i--) { z[j3[i][0]][j2[i][0]][j1[i][0]] = -1.0; } for (i = 10 - 1; i >= m1; i--) { z[j3[i][1]][j2[i][1]][j1[i][1]] = 1.0; } comm3(z, n1, n2, n3, lt); } static void showall(double ***z, int n1 , int n2 , int n3) { int i1; int i2; int i3; int m1; int m2; int m3; int _imopVarPre487; int _imopVarPre488; _imopVarPre487 = (n1 < 18); if (_imopVarPre487) { _imopVarPre488 = n1; } else { _imopVarPre488 = 18; } int _imopVarPre491; int _imopVarPre492; _imopVarPre491 = (n2 < 14); if (_imopVarPre491) { _imopVarPre492 = n2; } else { _imopVarPre492 = 14; } int _imopVarPre495; int _imopVarPre496; _imopVarPre495 = (n3 < 18); if (_imopVarPre495) { _imopVarPre496 = n3; } else { _imopVarPre496 = 18; } printf("\n"); for (i3 = 0; i3 < _imopVarPre496; i3++) { for (i1 = 0; i1 < _imopVarPre488; i1++) { for (i2 = 0; i2 < _imopVarPre492; i2++) { double _imopVarPre498; _imopVarPre498 = z[i3][i2][i1]; printf("%6.3f", _imopVarPre498); } printf("\n"); } printf(" - - - - - - - \n"); } printf("\n"); } static double power(double a, int n) { double aj; int nj; double rdummy; double power; power = 1.0; nj = n; while (nj != 0) { if ((nj % 2) == 1) { double *_imopVarPre500; double _imopVarPre501; _imopVarPre500 = &power; _imopVarPre501 = randlc(_imopVarPre500, a); rdummy = _imopVarPre501; } double *_imopVarPre503; double _imopVarPre504; _imopVarPre503 = &aj; _imopVarPre504 = randlc(_imopVarPre503, a); rdummy = _imopVarPre504; nj = nj / 2; } return power; } static void bubble(double ten[1037][2], int j1[1037][2] , int j2[1037][2] , int j3[1037][2] , int m , int ind) { double temp; int i; int j_temp; if (ind == 1) { for (i = 0; i < m - 1; i++) { if (ten[i][ind] > ten[i + 1][ind]) { temp = ten[i + 1][ind]; ten[i + 1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i + 1][ind]; j1[i + 1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i + 1][ind]; j2[i + 1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i + 1][ind]; j3[i + 1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { for (i = 0; i < m - 1; i++) { if (ten[i][ind] < ten[i + 1][ind]) { temp = ten[i + 1][ind]; ten[i + 1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i + 1][ind]; j1[i + 1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i + 1][ind]; j2[i + 1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i + 1][ind]; j3[i + 1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } static void zero3(double ***z, int n1 , int n2 , int n3) { int i1; int i2; int i3; #pragma omp parallel private(i1, i2, i3) { #pragma omp for nowait for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } // #pragma omp dummyFlush BARRIER_START written() read() #pragma omp barrier } }
SokobanMP_Digito.c
#include "../common/sort.h" #include "../common/structures.h" #include "../common/common.h" #include "../common/util.h" #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define SIZE_THREAD_LIST 2000 #define NUM_MAIN_STATES 1000 typedef struct idTrie { struct idTrie *idLeafs[10]; } idTrie; // Array para o espalhamento dos IDs. idTrie *mainId; State **lastMainState; idTrie *new_trie() { idTrie *returnTrie = malloc(sizeof(idTrie)); memset(returnTrie->idLeafs, 0, 10 * sizeof(idTrie *)); return returnTrie; } // Função que procura o id na lista unsigned char findId(State *s) { // Apontamos para a mainTrie idTrie *tempTrie = mainId; unsigned short tempValue = 0; unsigned char found = 0; // Para cada caixa: for (short i = 0; i < s->boxes; i++) { if (s->posBoxes[i] > 100) { #pragma critical(part1) { if (!tempTrie->idLeafs[s->posBoxes[i] / 100]) { tempTrie->idLeafs[s->posBoxes[i] / 100] = new_trie(); found = 1; } tempTrie = tempTrie->idLeafs[s->posBoxes[i] / 100]; } } tempValue = (s->posBoxes[i] / 10); #pragma critical(part2) { if (!tempTrie->idLeafs[tempValue % 10]) { tempTrie->idLeafs[tempValue % 10] = new_trie(); found = 1; } tempTrie = tempTrie->idLeafs[tempValue % 10]; } #pragma critical(part3) { if (!tempTrie->idLeafs[s->posBoxes[i] - tempValue * 10]) { tempTrie->idLeafs[s->posBoxes[i] - tempValue * 10] = new_trie(); found = 1; } tempTrie = tempTrie->idLeafs[s->posBoxes[i] - tempValue * 10]; } } #pragma critical(part4) { if (s->posPlayer > 100) { if (!tempTrie->idLeafs[s->posPlayer / 100]) { tempTrie->idLeafs[s->posPlayer / 100] = new_trie(); found = 1; } tempTrie = tempTrie->idLeafs[s->posPlayer / 100]; } tempValue = (s->posPlayer / 10); } #pragma critical(part5) { if (!tempTrie->idLeafs[tempValue % 10]) { tempTrie->idLeafs[tempValue % 10] = new_trie(); found = 1; } tempTrie = tempTrie->idLeafs[tempValue % 10]; } #pragma critical(part6) { if (!tempTrie->idLeafs[s->posPlayer - tempValue * 10]) { tempTrie->idLeafs[s->posPlayer - tempValue * 10] = new_trie(); found = 1; } } return found; } //------------------------------------------------------------------- // Função de Hash para pegar o ID do Estado unsigned char getStateId(State *s) { // Fazemos um sort pois a ordem das caixas não pode importar quickSort(s->posBoxes, 0, s->boxes - 1); /* Procuramos o ID na trie. Se estiver, retornamos verdadeiro, se não estiver o colocamos nela. */ unsigned char newId; newId = findId(s); return newId; } // Função que verifica se o estado é final // Dado que este algoritmo foi implementado possuindo os nívels -1, 00 e 01 em // mente, este não está preparado para níveis que possuam mais caixas que // objetivos unsigned char isFinal(State *s) { if (s->boxes == s->boxesOnGoals) { return 1; } return 0; } // Função que usamos para inserir o estado unsigned char insertState(State *root, State *s, State **lastThreadState) { if (isFinal(s)) { //É final return 1; } // Lista está vazia ou só possui o root. if (root->nextState == NULL) { // Criamos um novo espaço após root root->nextState = malloc(sizeof(State)); // Copiamos o estado copyState(s, root->nextState); // Last aponta para o último estado. Este last pode ser o da lista // principal, ou do thread (*lastThreadState) = root->nextState; return 0; } // A lista possui mais de um, e podemos usar seguramente o last (*lastThreadState)->nextState = malloc(sizeof(State)); copyState(s, (*lastThreadState)->nextState); // Mudamos a posição do último estado. *lastThreadState = (*lastThreadState)->nextState; (*lastThreadState)->nextState = NULL; return 0; } // Função que move uma das listas, enquanto cria a raiz para a outra void popState(State **from, State **to) { // Se ambos são o mesmo, devemos fazer uma operação de retirar um nó, // somente if (*to == *from) { State *freeableState = *to; *from = (*from)->nextState; free(freeableState); return; } // Ambos são diferentes, então é o thread solicitando da lista principal // Limpamos o que há no thread free(*to); // Thread recebe o primeiro valor da lista principal *to = *from; // Lista principal anda em um passo *from = (*from)->nextState; // Limpamos o próximo estado no thread, de forma que este não esteja // conectado com a lista principal. (*to)->nextState = NULL; } // Fazemos merge entre as duas listas, conectando o final da main com o começo // da thread /* mainLast threadroot ---------- ---------- | | nextState | | | |------------->| | | | | | ---------- ---------- */ void mergeLinkedLists(State **threadRoot, State **lastThreadState, State **mainRoot, State **mainLast) { // O último estado da lista principal recebe o primeiro estado do thread if ((*mainRoot) == NULL) { *mainRoot = *threadRoot; } (*mainLast)->nextState = (*threadRoot); *mainLast = *lastThreadState; *threadRoot = malloc(sizeof(State)); *lastThreadState = NULL; } int main(int argc, char *argv[]) { struct timespec before, after; time_t nSeconds; // Começamos a contagem de tempo. clock_gettime(CLOCK_REALTIME, &before); // Começamos um contador para a lista principal unsigned int mainStates = 1; // Criamos espaço para uma variável compartilhada que verifica se foi // encontrado uma solução por algum dos threads unsigned char *solution = malloc(1); *solution = 0; // Criamos espaço para a raiz da lista principal State *root = malloc(sizeof(State)); root->nextState = NULL; // Criamos um ponteiro temporário que irá ser movido State *s = malloc(sizeof(State)); // Ponteiro para o último estado principal é inicializado. lastMainState = malloc(sizeof(State *)); *lastMainState = NULL; // Ponteiro para a raiz da trie de Ids mainId = malloc(sizeof(idTrie)); memset(mainId->idLeafs, 0, 10 * sizeof(idTrie *)); // Constroi o primeiro estado, sequencialmente buildMap(root, argv[1]); getStateId(root); // Quantidade de threads solicitados int threads = strtol(argv[2], NULL, 10); // Pediremos que main faça NUM_MAIN_STATES estados para cada thread unsigned int numStates = NUM_MAIN_STATES * threads; while (mainStates < numStates) { for (int i = 0; i < 4; i++) { // Pra cada direção, nós copiamos o estado inicial copyState(root, s); if (movePlayer(s, i, getStateId) != 0) { /*movePlayer retorna 0 se não foi possível mover, seja por uma caixa sendo empurrada numa parede, seja por estarmos andando de cara na parede*/ mainStates++; if (insertState(root, s, lastMainState)) { // Se ele entrou aqui, quer dizer que, durante a inserção, // foi notado que ele é um estado final. printPath(s); *solution = 1; // Finalizamos a contagem de tempo. clock_gettime(CLOCK_REALTIME, &after); // Calcula o tempo passado em microssegundos. nSeconds = after.tv_nsec - before.tv_nsec + (after.tv_sec - before.tv_sec) * NANOS; printf("Achei sem threads: %lu ns - %lf s\n", nSeconds, (double)nSeconds / NANOS); return 0; } } } // Movemos root, colocando root como próximo estado popState(&root, &root); } // Chegando aqui, temos uma lista ligada à root com n<=4 estados. /* A estratégia aqui é: criar n threads, e sequencialmente cada um pega um estado da lista para si. Abriremos estes estados, agora paralelamente, em cada thread, criando uma lista ligada parcial. Cada thread procedirá para criar SIZE_THREAD_LIST estados, e então conectá-lo á lista principal. */ // root, lastMainState e solution serão compartilhados, todo resto é // declarado internamente e portanto, são privados. #pragma omp parallel num_threads(threads) shared(root, lastMainState, solution) { // threadRoot será a raiz da lista ligada temporária de cada thread State *threadRoot = NULL; // Estado para ser movido State *s; // Variável de condição que nos diz se devemos pegar um estado da lista // principal ou não unsigned char popMainList = 1; // Quantidade de estados ativos no thread unsigned int activeThreadStates = 0; // Criamos espaço para o estado temporário móvel s = malloc(sizeof(State)); // Criamos espaço para o ponteiro para o último estado presente neste // thread State **lastThreadState; lastThreadState = malloc(sizeof(State *)); (*lastThreadState) = NULL; // Enquanto não foi encontrado uma solução por nenhum thread while (!(*solution)) { // Se a variável de condição foi 1, devemos pegar um estado da lista // principal. Isto só acontecerá caso chegamos no limite estipulado // para cada thread, ou caso esta seja a primeira iteração de cada // thread if (popMainList) { // Esta região deve ser crítica, pois estamos mexendo com a // lista principal (e portante shared) #pragma omp critical(popMerge) popState(&root, &threadRoot); // Limpamos o popMainList popMainList = 0; } // Pra cada direção, iremos mover o estado, e depois adicionar na // nossa lista temporária. for (int i = 0; i < 4 && !(*solution); i++) { copyState(threadRoot, s); if (movePlayer(s, i, getStateId) != 0) { // Entrou aqui, quer dizer que ele conseguiu se mover, ou // seja, era um movimento válido. activeThreadStates++; if (insertState(threadRoot, s, lastThreadState)) { // Entrou aqui quer dizer que o estado era final, de // acordo com a definição de estado final. printPath(s); *solution = 1; } } } // Chegado aqui, exploramos as quatro direções. // Tentaremos criar uma lista de pelo menos SIZE_THREAD_LIST // elementos antes de adicionar à lista principal. Caso não // conseguimos estados suficientes, activeThreadStates = -1, todos // os nós que pegamos eram inúteis. Isso significa que precisamos // pegar novos nós da lista principal if (activeThreadStates < SIZE_THREAD_LIST && activeThreadStates > 0) { // Desempilhamos mais um, agora da nossa lista temporária, pois // não passamos da quantidade necessária popState(&threadRoot, &threadRoot); activeThreadStates--; } else { if (activeThreadStates > 0 && !(*solution)) { // há lista para empilhar // Como no pop acima, esta região é critica (e de mesmo nome // do pop) pois mexe com a lista principal #pragma omp critical(popMerge) mergeLinkedLists(&threadRoot, lastThreadState, &root, lastMainState); // Não há mais estados ativos no thread activeThreadStates = 0; } /*if*/ // Ordenamos que se retire da lista principal mais um nó para // ser expandido popMainList = 1; } /*else*/ } /*while*/ } /*pragma*/ // Finalizamos a contagem de tempo. clock_gettime(CLOCK_REALTIME, &after); // Calcula o tempo passado em microssegundos. nSeconds = after.tv_nsec - before.tv_nsec + (after.tv_sec - before.tv_sec) * NANOS; printf("Tempo total: %lu ns - %lf s\n", nSeconds, (double)nSeconds / NANOS); return 0; }
ast-dump-openmp-target-teams.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(void) { #pragma omp target teams ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams.c:3:1, line:6:1> line:3:6 test 'void (void)' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1> // CHECK-NEXT: `-OMPTargetTeamsDirective {{.*}} <line:4:1, col:25> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CapturedStmt {{.*}} <col:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:3> // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-NullStmt {{.*}} <line:5:3> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict' // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <line:5:3> // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams.c:4:1) *const restrict'
mandel-omp-for-row.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ //#pragma omp for schedule(runtime) #pragma omp parallel for schedule(runtime) private(row,col) for (row = 0; row < height; ++row) { for (col = 0; col < width; ++col) { { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
remarks_parallel_in_target_state_machine.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void baz(void) __attribute__((assume("omp_no_openmp"))); void bar(void) { #pragma omp parallel // #1 \ // expected-remark@#1 {{Parallel region is used in unknown ways. Will not attempt to rewrite the state machine. [OMP101]}} { } } void foo(void) { #pragma omp target teams // #2 // expected-remark@#2 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}} { baz(); // expected-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}} #pragma omp parallel { } bar(); #pragma omp parallel { } } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num deduplicated. [OMP170]}}
main.c
#include <omp.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include "eval.h" typedef struct { int score; int id; } eval_score_t; int compare_score(const void *a, const void *b) { return ((eval_score_t*)b)->score - ((eval_score_t*)a)->score; } #define PNUM 32 #define TNUM 4 int main(int argc, char *argv[]) { int *dna_a[PNUM]; int *dna_b[PNUM]; int *dna_t[TNUM]; for(int p = 0; p < TNUM; p++) dna_t[p] = (int *)malloc(SIZE * SIZE * sizeof(int)); for(int p = 0; p < PNUM; p++) { dna_a[p] = (int *)malloc(SIZE * SIZE * sizeof(int)); dna_b[p] = (int *)malloc(SIZE * SIZE * sizeof(int)); int *dna = dna_a[p]; for(int i = 0; i < SIZE * SIZE; i++) dna[i] = ((rand() % atoi(argv[1])) == 1) ? 1 : 0; } int gen = 0; p("Gen 0 generate complete\n"); while(1) { eval_score_t es[PNUM]; #pragma omp parallel for for(int p = 0; p < PNUM; p++) { int *dna = dna_a[p]; #pragma omp critical p("Gen %d id %d start\n", gen, p, es[p].score); es[p].score = eval(dna); es[p].id = p; #pragma omp critical p("Gen %d id %d finished : score %d\n", gen, p, es[p].score); } qsort(es, PNUM, sizeof(eval_score_t), compare_score); int dna_t[TNUM][SIZE * SIZE]; for(int i = 0; i < TNUM; i++) memcpy(dna_t[i], dna_a[es[i].id], SIZE * SIZE * sizeof(int)); p("*** Gen %d : score %d (id=%d)***\n", gen, es[0].score, es[0].id); char s[16]; sprintf(s, PATHFMT, gen); FILE *fp = fopen(s, "wt"); int *dna_x = dna_a[es[0].id]; for(int i = 0; i < SIZE * SIZE; i++) { fputc(" *"[dna_x[i]], fp); if((i % SIZE) == SIZE - 1) fputc('\n', fp); } fclose(fp); gen++; for(int i = 0; i < TNUM; i++) memcpy(dna_b[i], dna_t[i], SIZE * SIZE * sizeof(int)); for(int i = 0; i < TNUM; i++) { for(int j = 0; j < TNUM; j++) { int *dna_pa = dna_t[i]; int *dna_pb = dna_t[j]; for(int c = 0; c < SIZE * SIZE; c++) { dna_b[TNUM+i*TNUM+j][c] = rand() & 1 ? dna_pa[c] : dna_pb[c]; } } } int last = PNUM - (TNUM+TNUM*TNUM); int lhead = TNUM+TNUM*TNUM; for(int p = 0; p < last; p++) { int ch = rand() % PNUM; int dna_px[SIZE * SIZE]; memcpy(dna_px, dna_a[ch], SIZE * SIZE * sizeof(int)); for(int i = 0; i < SIZE * SIZE; i++) { if((rand() % 1000) < 10) { dna_px[i] = rand() & 1; } } memcpy(dna_b[lhead+p], dna_px, SIZE * SIZE * sizeof(int)); } for(int i = 0; i < PNUM; i++) memcpy(dna_a[i], dna_b[i], SIZE * SIZE * sizeof(int)); } return 0; }
vect-simd-clone-12a.c
/* { dg-do compile } */ #include "vect-simd-clone-10.h" #pragma omp declare simd notinbranch __attribute__((noinline)) int foo (long int a, int b, int c) { return a + b + c; } #pragma omp declare simd notinbranch __attribute__((noinline)) long int bar (int a, int b, long int c) { return a + b + c; }
convolution_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]); int8x8_t _w = vld1_s8(kptr); int16x8_t _s0 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); kptr += 8; } } vst1q_s32(outptr + j * 4, _sum0); } outptr += outw * 4; } } }
cv_utils.h
/*! ***************************************************************************** * \file cv_utils.h * \author moennen * \brief * \date 2018-03-16 * *****************************************************************************/ #ifndef _UTILS_CV_UTILS_H #define _UTILS_CV_UTILS_H #include "utils/Hop.h" #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include <opencv2/imgproc.hpp> #include <glm/glm.hpp> #include <iostream> namespace cv_utils { inline float toLinear( float c ) { return c <= 0.04045f ? c / 12.92f : std::pow( ( c + 0.055f ) / 1.055f, 2.4f ); } inline float toLog( float c ) { return c <= 0.0031308f ? c * 12.92f : 1.055f * std::pow( c, 1.0f / 2.4f ) - 0.055f; } inline void normalizeMeanStd( cv::Mat& img ) { cv::Mat mean, std; cv::meanStdDev( img, mean, std ); img = ( img - mean ) / std; } inline void imToLinear( cv::Mat& img ) { HOP_PROF_FUNC(); const size_t rowSz = img.cols * 3; #pragma omp parallel for for ( size_t y = 0; y < img.rows; y++ ) { float* row_data = img.ptr<float>( y ); for ( size_t x = 0; x < rowSz; x++ ) { row_data[x] = toLinear( row_data[x] ); } } } inline void imToLog( cv::Mat& img ) { const size_t rowSz = img.cols * 3; #pragma omp parallel for for ( size_t y = 0; y < img.rows; y++ ) { float* row_data = img.ptr<float>( y ); for ( size_t x = 0; x < rowSz; x++ ) { row_data[x] = toLog( row_data[x] ); } } } template <class T> inline void adjustContrastBrightness( cv::Mat& img, const float alpha, const float beta ) { #pragma omp parallel for for ( size_t y = 0; y < img.rows; y++ ) { T* row_data = img.ptr<T>( y ); for ( size_t x = 0; x < img.cols; x++ ) { row_data[x] = clamp( alpha * row_data[x] + beta, T( 0.0f ), T( 1.0f ) ); } } } inline void resizeTo( cv::Mat& img, const glm::uvec2 sampleSz ) { glm::uvec2 imgSz( img.cols, img.rows ); const float ds = std::max( (float)sampleSz.y / imgSz.y, (float)sampleSz.x / imgSz.x ); cv::resize( img, img, cv::Size(), ds, ds, ds < 1.0 ? CV_INTER_AREA : CV_INTER_LINEAR ); imgSz = glm::uvec2( img.cols, img.rows ); const glm::uvec2 off( ( imgSz.x - sampleSz.x ) / 2, ( imgSz.y - sampleSz.y ) / 2 ); img = img( cv::Rect( off.x, off.y, sampleSz.x, sampleSz.y ) ).clone(); } inline void toCHW32F( cv::Mat& in ) { std::vector<cv::Mat> in_chns( in.channels() ); cv::split( in, &in_chns[0] ); float* inPtr = (float*)in.data; size_t off = in.rows * in.cols; #pragma omp parallel for for ( int c = 0; c < in.channels(); ++c ) { std::memcpy( inPtr + c * off, in_chns[c].data, sizeof( float ) * off ); } } inline cv::Mat imread32FC1( const std::string& imgPath, const float smax = 255.0 ) { HOP_PROF_FUNC(); cv::Mat img; { HOP_PROF( "cv_imread_c1" ); img = cv::imread( imgPath, cv::IMREAD_UNCHANGED ); } if ( !img.data || ( img.channels() > 1 ) ) { std::cerr << "ERROR loading c1 image : " << imgPath << std::endl; return cv::Mat(); } if ( img.type() != CV_32F ) { HOP_PROF( "cv_convert" ); img.convertTo( img, CV_32F ); img /= smax; } return img; } cv::Mat convert8UC3ToLinear32FC3( cv::Mat& img ); inline cv::Mat imread32FC3( const std::string& imgPath, bool toLinear = false, bool toRGB = false, const float smax = 255.0 ) { HOP_PROF_FUNC(); cv::Mat img; { HOP_PROF( "cv_imread" ); img = cv::imread( imgPath, cv::IMREAD_UNCHANGED ); } if ( !img.data || ( img.channels() == 2 ) || ( img.channels() > 4 ) ) { std::cerr << "ERROR loading image : " << imgPath << std::endl; return cv::Mat(); } if ( img.channels() == 1 ) cv::cvtColor( img, img, toRGB ? cv::COLOR_GRAY2BGR : cv::COLOR_GRAY2RGB ); else if ( img.channels() == 4 ) cv::cvtColor( img, img, toRGB ? cv::COLOR_RGBA2BGR : cv::COLOR_RGBA2RGB ); else if ( toRGB ) cv::cvtColor( img, img, cv::COLOR_BGR2RGB ); if ( ( img.type() == CV_8UC3 ) && toLinear ) { img = convert8UC3ToLinear32FC3( img ); } else { if ( img.depth() != CV_32F ) { HOP_PROF( "cv_convert" ); img.convertTo( img, CV_32F ); img /= smax; } if ( toLinear ) imToLinear( img ); } return img; } inline cv::Mat imread32FC4( const std::string& imgPath, bool toLinear = false, bool toRGB = true, const float smax = 255.0 ) { HOP_PROF_FUNC(); cv::Mat img; { HOP_PROF( "cv_imread" ); img = cv::imread( imgPath, cv::IMREAD_UNCHANGED ); } if ( !img.data || ( img.channels() == 2 ) || ( img.channels() > 4 ) ) { std::cerr << "ERROR loading image : " << imgPath << std::endl; return cv::Mat(); } if ( img.channels() == 1 ) cv::cvtColor( img, img, toRGB ? cv::COLOR_GRAY2BGRA : cv::COLOR_GRAY2RGBA ); else if ( img.channels() == 3 ) cv::cvtColor( img, img, toRGB ? cv::COLOR_RGB2BGRA : cv::COLOR_RGB2RGBA ); else if ( toRGB ) cv::cvtColor( img, img, cv::COLOR_BGRA2RGBA ); if ( img.depth() != CV_32F ) { HOP_PROF( "cv_convert" ); img.convertTo( img, CV_32F ); img /= smax; } if ( toLinear ) imToLinear( img ); return img; } template <class TVec3> inline TVec3 imsample32FC3( const cv::Mat& img, const glm::vec2& in_pt ) { // compute the positions const glm::vec2 max_pt( img.cols - 1, img.rows - 1 ); const glm::vec2 pt = glm::clamp( in_pt, glm::vec2( 0.0 ), max_pt ); const glm::ivec2 ul_pt( static_cast<int>( std::floor( pt.x ) ), static_cast<int>( std::floor( pt.y ) ) ); // fetch the data const float* u_row = img.ptr<float>( ul_pt.y ) + 3 * ul_pt.x; const glm::vec3 ul( u_row[0], u_row[1], u_row[2] ); const glm::vec3 ur = ul_pt.x < max_pt.x ? glm::vec3( u_row[3], u_row[4], u_row[5] ) : ul; const float* b_row = ul_pt.y < max_pt.y ? img.ptr<float>( ul_pt.y + 1 ) + 3 * ul_pt.x : u_row; const glm::vec3 bl( b_row[0], b_row[1], b_row[2] ); const glm::vec3 br = ul_pt.x < max_pt.x ? glm::vec3( b_row[3], b_row[4], b_row[5] ) : bl; // linear interpolation const glm::vec3 bgr = glm::mix( glm::mix( ul, ur, pt.x - ul_pt.x ), glm::mix( bl, br, pt.x - ul_pt.x ), pt.y - ul_pt.y ); return TVec3( bgr.x, bgr.y, bgr.z ); } template <class TVec> inline TVec imsample32F( const cv::Mat& img, const glm::vec2& in_pt ) { // compute the positions const glm::vec2 max_pt( img.cols - 1, img.rows - 1 ); const glm::vec2 pt = glm::clamp( in_pt, glm::vec2( 0.0 ), max_pt ); const glm::ivec2 ul_pt( static_cast<int>( std::floor( pt.x ) ), static_cast<int>( std::floor( pt.y ) ) ); // fetch the data const TVec& ul = img.ptr<TVec>( ul_pt.y )[ul_pt.x]; const TVec& ur = ul_pt.x < max_pt.x ? img.ptr<TVec>( ul_pt.y )[ul_pt.x + 1] : ul; const TVec& bl = ul_pt.y < max_pt.y ? img.ptr<TVec>( ul_pt.y + 1 )[ul_pt.x] : ul; const TVec& br = ul_pt.y < max_pt.y ? ( ul_pt.x < max_pt.x ? img.ptr<TVec>( ul_pt.y + 1 )[ul_pt.x + 1] : bl ) : ur; // linear interpolation return glm::mix( glm::mix( ul, ur, pt.x - ul_pt.x ), glm::mix( bl, br, pt.x - ul_pt.x ), pt.y - ul_pt.y ); } template <class TVec> inline TVec imnearest32F( const cv::Mat& img, const glm::vec2& in_pt ) { // compute the positions const glm::ivec2 max_pt( img.cols - 1, img.rows - 1 ); const glm::ivec2 b_pt( static_cast<int>( std::floor( in_pt.x ) ), static_cast<int>( std::floor( in_pt.y ) ) ); glm::ivec2 nearest_pt( ( in_pt.x - b_pt.x > 0.5 ? b_pt.x + 1 : b_pt.x ), ( in_pt.y - b_pt.y > 0.5 ? b_pt.y + 1 : b_pt.y ) ); nearest_pt = glm::clamp( nearest_pt, glm::ivec2( 0 ), max_pt ); return img.ptr<TVec>( nearest_pt.y )[nearest_pt.x]; } inline void imToBuffer( const cv::Mat& img, float* buff, const bool toRGB = false ) { HOP_PROF_FUNC(); const size_t row_stride = img.cols * img.channels(); const size_t row_size = sizeof( float ) * row_stride; #pragma omp parallel for for ( size_t y = 0; y < img.rows; y++ ) { const float* row_img_data = img.ptr<float>( y ); float* row_buff_data = buff + y * row_stride; memcpy( row_buff_data, row_img_data, row_size ); } } inline void fittResizeCrop( cv::Mat& img, const glm::uvec2 sampleSz ) { glm::uvec2 imgSz( img.cols, img.rows ); // rescale const float ds = std::min( (float)sampleSz.y / imgSz.y, (float)sampleSz.x / imgSz.x ); cv::resize( img, img, cv::Size(), ds, ds, CV_INTER_AREA ); imgSz = glm::uvec2( img.cols, img.rows ); // translate const glm::ivec2 trans( std::floor( 0.5 * ( imgSz.x - sampleSz.x ) ), std::floor( 0.5 * ( imgSz.y - sampleSz.y ) ) ); // crop img = img( cv::Rect( trans.x, trans.y, sampleSz.x, sampleSz.y ) ).clone(); } } #endif // _UTILS_CV_UTILS_H
GB_unop__cimag_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cimag_fp64_fc64) // op(A') function: GB (_unop_tran__cimag_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = cimag (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cimag (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = cimag (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CIMAG || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cimag_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = cimag (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = cimag (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cimag_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
offloading_success.c
// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu #include <stdio.h> #include <omp.h> int main(void) { int isHost = -1; #pragma omp target { isHost = omp_is_initial_device(); } if (isHost < 0) { printf("Runtime error, isHost=%d\n", isHost); } // CHECK: Target region executed on the device printf("Target region executed on the %s\n", isHost ? "host" : "device"); return isHost; }
SpatialMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialMaxPooling.c" #else static void nn_(SpatialMaxPooling_updateOutput_frame)(real *input_p, real *output_p, real *indx_p, real *indy_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int kW, int kH, int dW, int dH) { long k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { /* loop over output */ long i, j; for(i = 0; i < oheight; i++) { for(j = 0; j < owidth; j++) { /* local pointers */ real *ip = input_p + k*iwidth*iheight + i*iwidth*dH + j*dW; real *op = output_p + k*owidth*oheight + i*owidth + j; real *indyp = indy_p + k*owidth*oheight + i*owidth + j; real *indxp = indx_p + k*owidth*oheight + i*owidth + j; /* compute local max: */ long maxindex = -1; real maxval = -THInf; long tcntr = 0; int x,y; for(y = 0; y < kH; y++) { for(x = 0; x < kW; x++) { real val = *(ip + y*iwidth + x); if (val > maxval) { maxval = val; maxindex = tcntr; } tcntr++; } } /* set output to local max */ *op = maxval; /* store location of max (x,y) */ *indyp = (int)(maxindex / kW)+1; *indxp = (maxindex % kW) +1; } } } } static int nn_(SpatialMaxPooling_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); int dimw = 2; int dimh = 1; long nbatch = 1; long nslices; long iheight; long iwidth; long oheight; long owidth; real *input_data; real *output_data; real *indices_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4 , 2, "3D or 4D (batch mode) tensor expected"); if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } luaL_argcheck(L, input->size[dimw] >= kW && input->size[dimh] >= kH, 2, "input image smaller than kernel size"); /* sizes */ nslices = input->size[dimh-1]; iheight = input->size[dimh]; iwidth = input->size[dimw]; oheight = (iheight - kH) / dH + 1; owidth = (iwidth - kW) / dW + 1; /* get contiguous input */ input = THTensor_(newContiguous)(input); /* resize output */ if (input->nDimension == 3) { THTensor_(resize3d)(output, nslices, oheight, owidth); /* indices will contain i,j locations for each output point */ THTensor_(resize4d)(indices, 2, nslices, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THTensor_(data)(indices); nn_(SpatialMaxPooling_updateOutput_frame)(input_data, output_data, indices_data+nslices*owidth*oheight, indices_data, nslices, iwidth, iheight, owidth, oheight, kW, kH, dW, dH); } else { long p; THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); /* indices will contain i,j locations for each output point */ THTensor_(resize5d)(indices, 2, nbatch, nslices, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THTensor_(data)(indices); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { nn_(SpatialMaxPooling_updateOutput_frame)(input_data+p*nslices*iwidth*iheight, output_data+p*nslices*owidth*oheight, indices_data+(p+nbatch)*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight, nslices, iwidth, iheight, owidth, oheight, kW, kH, dW, dH); } } /* cleanup */ THTensor_(free)(input); return 1; } static void nn_(SpatialMaxPooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p, real *indx_p, real *indy_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int dW, int dH) { long k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { real *gradInput_p_k = gradInput_p + k*iwidth*iheight; real *gradOutput_p_k = gradOutput_p + k*owidth*oheight; real *indx_p_k = indx_p + k*owidth*oheight; real *indy_p_k = indy_p + k*owidth*oheight; /* calculate max points */ long i, j; for(i = 0; i < oheight; i++) { for(j = 0; j < owidth; j++) { /* retrieve position of max */ long maxi = indy_p_k[i*owidth + j] - 1 + i*dH; long maxj = indx_p_k[i*owidth + j] - 1 + j*dW; /* update gradient */ gradInput_p_k[maxi*iwidth + maxj] += gradOutput_p_k[i*owidth + j]; } } } } static int nn_(SpatialMaxPooling_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); int dimw = 2; int dimh = 1; long nbatch = 1; int nslices; int iheight; int iwidth; int oheight; int owidth; real *gradInput_data; real *gradOutput_data; real *indices_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } /* sizes */ nslices = input->size[dimh-1]; iheight = input->size[dimh]; iwidth = input->size[dimw]; oheight = gradOutput->size[dimh]; owidth = gradOutput->size[dimw]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THTensor_(data)(indices); /* backprop */ if (input->nDimension == 3) { nn_(SpatialMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data, indices_data+nslices*owidth*oheight, indices_data, nslices, iwidth, iheight, owidth, oheight, dW, dH); } else { long p; #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { nn_(SpatialMaxPooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight, indices_data+(p+nbatch)*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight, nslices, iwidth, iheight, owidth, oheight, dW, dH); } } /* cleanup */ THTensor_(free)(gradOutput); return 1; } static const struct luaL_Reg nn_(SpatialMaxPooling__) [] = { {"SpatialMaxPooling_updateOutput", nn_(SpatialMaxPooling_updateOutput)}, {"SpatialMaxPooling_updateGradInput", nn_(SpatialMaxPooling_updateGradInput)}, {NULL, NULL} }; static void nn_(SpatialMaxPooling_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialMaxPooling__), "nn"); lua_pop(L,1); } #endif
max_threads.c
// RUN: %compile-run-and-check #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int MaxThreadsL1 = -1, MaxThreadsL2 = -1; #pragma omp declare reduction(unique:int \ : omp_out = (omp_in == 1 ? omp_in : omp_out)) \ initializer(omp_priv = -1) // Non-SPMD mode. #pragma omp target teams map(MaxThreadsL1, MaxThreadsL2) thread_limit(32) \ num_teams(1) { MaxThreadsL1 = omp_get_max_threads(); #pragma omp parallel reduction(unique : MaxThreadsL2) { MaxThreadsL2 = omp_get_max_threads(); } } //FIXME: This Non-SPMD kernel will have 32 active threads due to // thread_limit. However, Non-SPMD MaxThreadsL1 is the total number of // threads in block (64 in this case), which translates to worker // threads + WARP_SIZE for Non-SPMD kernels and worker threads for SPMD // kernels. According to the spec, omp_get_max_threads must return the // max active threads possible between the two kernel types. // CHECK: Non-SPMD MaxThreadsL1 = 64 printf("Non-SPMD MaxThreadsL1 = %d\n", MaxThreadsL1); // CHECK: Non-SPMD MaxThreadsL2 = 1 printf("Non-SPMD MaxThreadsL2 = %d\n", MaxThreadsL2); // SPMD mode with full runtime MaxThreadsL2 = -1; #pragma omp target parallel reduction(unique : MaxThreadsL2) { MaxThreadsL2 = omp_get_max_threads(); } // CHECK: SPMD with full runtime MaxThreadsL2 = 1 printf("SPMD with full runtime MaxThreadsL2 = %d\n", MaxThreadsL2); // SPMD mode without runtime MaxThreadsL2 = -1; #pragma omp target parallel for reduction(unique : MaxThreadsL2) for (int I = 0; I < 2; ++I) { MaxThreadsL2 = omp_get_max_threads(); } // CHECK: SPMD without runtime MaxThreadsL2 = 1 printf("SPMD without runtime MaxThreadsL2 = %d\n", MaxThreadsL2); return 0; }
stream_triad_opt2.c
#include <stdio.h> #include <omp.h> #include "timer.h" #define NTIMES 16 // large enough to force into main memory #define STREAM_ARRAY_SIZE 80000000 static double a[STREAM_ARRAY_SIZE], b[STREAM_ARRAY_SIZE], c[STREAM_ARRAY_SIZE]; int main(int argc, char *argv[]){ #pragma omp parallel if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads()); struct timespec tstart; // initializing data and arrays double scalar = 3.0, time_sum = 0.0; #pragma omp parallel for for (int i=0; i<STREAM_ARRAY_SIZE; i++) { a[i] = 1.0; b[i] = 2.0; } for (int k=0; k<NTIMES; k++){ cpu_timer_start(&tstart); // stream triad loop #pragma omp parallel for for (int i=0; i<STREAM_ARRAY_SIZE; i++){ c[i] = a[i] + scalar*b[i]; } time_sum += cpu_timer_stop(tstart); // to keep the compiler from optimizing out the loop c[1]=c[2]; } printf("Average runtime is %lf msecs\n", time_sum/NTIMES); }
sparse.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /********************************************************************************* NAME: sparse PURPOSE: This program tests the efficiency with which a sparse matrix vector multiplication is carried out USAGE: The program takes as input the number of threads, the 2log of the linear size of the 2D grid (equalling the 2log of the square root of the order of the sparse matrix), the radius of the difference stencil, and the number of times the matrix-vector multiplication is carried out. <progname> <# threads> <# iterations> <2log root-of-matrix-order> <radius> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() reverse() NOTES: HISTORY: Written by Rob Van der Wijngaart, August 2006. Updated by RvdW to parallelize matrix generation, March 2007. Updated by RvdW to fix verification bug, February 2013 Updated by RvdW to sort matrix elements to reflect traditional CSR storage, August 2013 ***********************************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> /* linearize the grid index */ #define LIN(i,j) (i+((j)<<lsize)) /* if the scramble flag is set, convert all (linearized) grid indices by reversing their bits; if not, leave the grid indices alone */ #ifdef SCRAMBLE #define REVERSE(a,b) reverse((a),(b)) #else #define REVERSE(a,b) (a) #endif #define BITS_IN_BYTE 8 static u64Int reverse(register u64Int, int); static int compare(const void *el1, const void *el2); int main(int argc, char **argv){ int iter, r; /* dummies */ int lsize; /* logarithmic linear size of grid */ int lsize2; /* logarithmic size of grid */ int size; /* linear size of grid */ s64Int size2; /* matrix order (=total # points in grid) */ int radius, /* stencil parameters */ stencil_size; s64Int row, col, first, last; /* dummies */ s64Int i, j; /* dummies */ int iterations; /* number of times the multiplication is done */ s64Int elm; /* sequence number of matrix nonzero */ s64Int nent; /* number of nonzero entries */ double sparsity; /* fraction of non-zeroes in matrix */ double sparse_time,/* timing parameters */ avgtime; double * RESTRICT matrix; /* sparse matrix entries */ double * RESTRICT vector; /* vector multiplying the sparse matrix */ double * RESTRICT result; /* computed matrix-vector product */ double temp; /* temporary scalar storing reduction data */ double vector_sum; /* checksum of result */ double reference_sum; /* checksum of "rhs" */ double epsilon = 1.e-8; /* error tolerance */ s64Int * RESTRICT colIndex; /* column indices of sparse matrix entries */ int nthread_input, /* thread parameters */ nthread; int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ size_t vector_space, /* variables used to hold malloc sizes */ matrix_space, index_space; if (argc != 5) { printf("Usage: %s <# threads> <# iterations> <2log grid size> <stencil radius>\n",*argv); exit(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: Iterations must be positive : %d \n", iterations); exit(EXIT_FAILURE); } lsize = atoi(*++argv); lsize2 = 2*lsize; size = 1<<lsize; if (lsize <0) { printf("ERROR: Log of grid size must be greater than or equal to zero: %d\n", (int) lsize); exit(EXIT_FAILURE); } /* compute number of points in the grid */ size2 = size*size; radius = atoi(*++argv); if (radius <0) { printf("ERROR: Stencil radius must be non-negative: %d\n", (int) size); exit(EXIT_FAILURE); } /* emit error if (periodic) stencil overlaps with itself */ if (size <2*radius+1) { printf("ERROR: Grid extent %d smaller than stencil diameter 2*%d+1= %d\n", size, radius, radius*2+1); exit(EXIT_FAILURE); } /* compute total size of star stencil in 2D */ stencil_size = 4*radius+1; /* sparsity follows from number of non-zeroes per row */ sparsity = (double)(4*radius+1)/(double)size2; /* compute total number of non-zeroes */ nent = size2*stencil_size; matrix_space = nent*sizeof(double); if (matrix_space/sizeof(double) != nent) { printf("ERROR: Cannot represent space for matrix: %ul\n", matrix_space); exit(EXIT_FAILURE); } matrix = (double *) malloc(matrix_space); if (!matrix) { printf("ERROR: Could not allocate space for sparse matrix: "FSTR64U"\n", nent); exit(EXIT_FAILURE); } vector_space = 2*size2*sizeof(double); if (vector_space/sizeof(double) != 2*size2) { printf("ERROR: Cannot represent space for vectors: %ul\n", vector_space); exit(EXIT_FAILURE); } vector = (double *) malloc(vector_space); if (!vector) { printf("ERROR: Could not allocate space for vectors: %d\n", (int)(2*size2)); exit(EXIT_FAILURE); } result = vector + size2; index_space = nent*sizeof(s64Int); if (index_space/sizeof(s64Int) != nent) { printf("ERROR: Cannot represent space for column indices: %ul\n", index_space); exit(EXIT_FAILURE); } colIndex = (s64Int *) malloc(index_space); if (!colIndex) { printf("ERROR: Could not allocate space for column indices: "FSTR64U"\n", nent*sizeof(s64Int)); exit(EXIT_FAILURE); } #pragma omp parallel private (row, col, elm, first, last, iter) { #pragma omp master { nthread = omp_get_num_threads(); printf("OpenMP Sparse matrix-vector multiplication\n"); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %16d\n",nthread_input); printf("Matrix order = "FSTR64U"\n", size2); printf("Stencil diameter = %16d\n", 2*radius+1); printf("Sparsity = %16.10lf\n", sparsity); printf("Number of iterations = %16d\n", iterations); #ifdef SCRAMBLE printf("Using scrambled indexing\n"); #else printf("Using canonical indexing\n"); #endif } } bail_out(num_error); /* initialize the input and result vectors */ #pragma omp for for (row=0; row<size2; row++) result[row] = vector[row] = 0.0; /* fill matrix with nonzeroes corresponding to difference stencil. We use the scrambling for reordering the points in the grid. */ #pragma omp for private (i,j,r) for (row=0; row<size2; row++) { j = row/size; i=row%size; elm = row*stencil_size; colIndex[elm] = REVERSE(LIN(i,j),lsize2); for (r=1; r<=radius; r++, elm+=4) { colIndex[elm+1] = REVERSE(LIN((i+r)%size,j),lsize2); colIndex[elm+2] = REVERSE(LIN((i-r+size)%size,j),lsize2); colIndex[elm+3] = REVERSE(LIN(i,(j+r)%size),lsize2); colIndex[elm+4] = REVERSE(LIN(i,(j-r+size)%size),lsize2); } /* sort colIndex to make sure the compressed row accesses vector elements in increasing order */ qsort(&(colIndex[row*stencil_size]), stencil_size, sizeof(s64Int), compare); for (elm=row*stencil_size; elm<(row+1)*stencil_size; elm++) matrix[elm] = 1.0/(double)(colIndex[elm]+1); } for (iter=0; iter<=iterations; iter++) { /* start timer after a warmup iteration */ if (iter == 1) { #pragma omp barrier #pragma omp master { sparse_time = wtime(); } } /* fill vector */ #pragma omp for for (row=0; row<size2; row++) vector[row] += (double) (row+1); /* do the actual matrix-vector multiplication */ #pragma omp for for (row=0; row<size2; row++) { first = stencil_size*row; last = first+stencil_size-1; #pragma simd reduction(+:temp) for (temp=0.0,col=first; col<=last; col++) { temp += matrix[col]*vector[colIndex[col]]; } result[row] += temp; } } /* end of iterations */ #pragma omp barrier #pragma omp master { sparse_time = wtime() - sparse_time; } } /* end of parallel region */ /* verification test */ reference_sum = 0.5 * (double) nent * (double) (iterations+1) * (double) (iterations +2); vector_sum = 0.0; for (row=0; row<size2; row++) vector_sum += result[row]; if (ABS(vector_sum-reference_sum) > epsilon) { printf("ERROR: Vector sum = %lf, Reference vector sum = %lf\n", vector_sum, reference_sum); exit(EXIT_FAILURE); } else { printf("Solution validates\n"); #ifdef VERBOSE printf("Reference sum = %lf, vector sum = %lf\n", reference_sum, vector_sum); #endif } avgtime = sparse_time/iterations; printf("Rate (MFlops/s): %lf Avg time (s): %lf\n", 1.0E-06 * (2.0*nent)/avgtime, avgtime); exit(EXIT_SUCCESS); } /* Code below reverses bits in unsigned integer stored in a 64-bit word. Bit reversal is with respect to the largest integer that is going to be processed for the particular run of the code, to make sure the reversal constitutes a true permutation. Hence, the final result needs to be shifted to the right. Example: if largest integer being processed is 0x000000ff = 255 = 0000...0011111111 (binary), then the unshifted reversal of 0x00000006 = 6 = 0000...0000000110 (binary) would be 011000000...0000 = 3*2^61, which is outside the range of the original sequence 0-255. Setting shift_in_bits to 2log(256) = 8, the final result is shifted the the right by 64-8=56 bits, so we get 000...0001100000 (binary) = 96, which is within the proper range */ u64Int reverse(register u64Int x, int shift_in_bits){ x = ((x >> 1) & 0x5555555555555555) | ((x << 1) & 0xaaaaaaaaaaaaaaaa); x = ((x >> 2) & 0x3333333333333333) | ((x << 2) & 0xcccccccccccccccc); x = ((x >> 4) & 0x0f0f0f0f0f0f0f0f) | ((x << 4) & 0xf0f0f0f0f0f0f0f0); x = ((x >> 8) & 0x00ff00ff00ff00ff) | ((x << 8) & 0xff00ff00ff00ff00); x = ((x >> 16) & 0x0000ffff0000ffff) | ((x << 16) & 0xffff0000ffff0000); x = ((x >> 32) & 0x00000000ffffffff) | ((x << 32) & 0xffffffff00000000); return (x>>((sizeof(u64Int)*BITS_IN_BYTE-shift_in_bits))); } int compare(const void *el1, const void *el2) { s64Int v1 = *(s64Int *)el1; s64Int v2 = *(s64Int *)el2; return (v1<v2) ? -1 : (v1>v2) ? 1 : 0; }
AdPointDataReaders.h
/* Ralf Kaehler 14 December 2016 Copyright (c) 2016, The Board of Trustees of the Leland Stanford Junior University, through SLAC National Accelerator Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. (3) Neither the name of the Leland Stanford Junior University, SLAC National Accelerator Laboratory, U.S. Dept. of Energy nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER, THE UNITED STATES GOVERNMENT, OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to make your Enhancements available either publicly, or directly to SLAC National Accelerator Laboratory, without imposing a separate written license agreement for such Enhancements, then you hereby grant the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare derivative works, incorporate into other computer software, distribute, and sublicense such Enhancements or derivative works thereof, in binary and source code form. */ #ifndef _AD_POINT_DATA_READERS_ #define _AD_POINT_DATA_READERS_ #include <cfloat> #include <string> #include "AdMeasureMPIWallClockTime.h" #include "AdUtils.h" #include "AdAssert.h" #include "AdLagrangianRedistribution.h" namespace AdaptiveMassDeposit { static inline AABBox get_global_bbox_mpi( const bool use_local_bbox, const AABBox& local_bbox ) { float loc_min[3] = {local_bbox.min[0],local_bbox.min[1],local_bbox.min[2]}; float loc_max[3] = {local_bbox.max[0],local_bbox.max[1],local_bbox.max[2]}; float global_bbox_min[3]; float global_bbox_max[3]; if ( use_local_bbox==false ) { loc_min[0] = loc_min[1] = loc_min[2] = FLT_MAX; loc_max[0] = loc_max[1] = loc_max[2] = FLT_MIN; } CHECK_MPI_ERROR( MPI_Allreduce(loc_min, global_bbox_min, 3, MPI_FLOAT, MPI_MIN, MPI_COMM_WORLD) ); CHECK_MPI_ERROR( MPI_Allreduce(loc_max, global_bbox_max, 3, MPI_FLOAT, MPI_MAX, MPI_COMM_WORLD) ); AABBox res; res.min = PosVec(global_bbox_min[0],global_bbox_min[1],global_bbox_min[2]); res.max = PosVec(global_bbox_max[0],global_bbox_max[1],global_bbox_max[2]); return res; } static AABBox get_local_bbox( const std::vector<std::shared_ptr<particles_with_ids_chunk> >& particles_by_proc ) { AABBox box; bool has_local_particles = false; // first initialize bbox for ( size_t p=0; p<particles_by_proc.size(); ++p ) { if ( particles_by_proc[p]->get_num()>0 ) { pos_t pos[3]; particles_by_proc[p]->get_position( 0, pos ); box.min = PosVec( pos[0],pos[1],pos[2] ); box.max = PosVec( pos[0],pos[1],pos[2] ); has_local_particles = true; break; } } if ( has_local_particles==false ) { throw AdRuntimeException("WARNING: get_local_bbox(): have no local particles.",false); } // to-do: try to optimize this using OpemMP for ( size_t p=0; p<particles_by_proc.size(); ++p ) { const Particles::pos_t_vec pos_x_ptr = *particles_by_proc[p]->get_positions( 0 ); const Particles::pos_t_vec pos_y_ptr = *particles_by_proc[p]->get_positions( 1 ); const Particles::pos_t_vec pos_z_ptr = *particles_by_proc[p]->get_positions( 2 ); for ( size_t i=0; i<pos_x_ptr.size(); ++i ) { box.min = PosVec( std::min(pos_x_ptr[i],box.min[0]), std::min(pos_y_ptr[i],box.min[1]), std::min(pos_z_ptr[i],box.min[2]) ); box.max = PosVec( std::max(pos_x_ptr[i],box.max[0]), std::max(pos_y_ptr[i],box.max[1]), std::max(pos_z_ptr[i],box.max[2]) ); } } return box; } static AABBox get_global_bbox_mpi( const std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_by_proc ) { try { const AABBox local_bbox = get_local_bbox( particles_by_proc ); return get_global_bbox_mpi( true, local_bbox ); } catch( std::exception& ex ) { AABBox local_bbox; std::cout << "WARNING: get_global_bbox_mpi(): Local process does not own any particles yet. Computing global bbox without it." << std::endl; return get_global_bbox_mpi( false, local_bbox ); } } template <class ID_MAPPING_ORDER> static void apply_boundary_corrections(const AABBox& bbox, const AvVec3i& grid_dims, const ID_MAPPING_ORDER& id_functor, particles_with_ids_chunk& particles ) { const pos_t box_ext[3] = { bbox.get_extension(0), bbox.get_extension(1), bbox.get_extension(2) }; const pos_t box_ext_h1[3] = { bbox.min[0]+box_ext[0]/pos_t(4.0), bbox.min[1]+box_ext[1]/pos_t(4.0), bbox.min[2]+box_ext[2]/pos_t(4.0) }; const pos_t box_ext_h2[3] = { bbox.min[0]+pos_t(3./4.)*box_ext[0], bbox.min[1]+pos_t(3./4.)*box_ext[1], bbox.min[2]+pos_t(3./4.)*box_ext[2] }; const pos_t td_h[3] = { pos_t(grid_dims[0])/pos_t(2.0), pos_t(grid_dims[1])/pos_t(2.0), pos_t(grid_dims[2])/pos_t(2.0) }; Particles::pos_t_vec& px = *particles.get_positions(0); Particles::pos_t_vec& py = *particles.get_positions(1); Particles::pos_t_vec& pz = *particles.get_positions(2); Particles::ids_t_vec& ids = *particles.get_ids(); // to-do: should/could parallelize this using OpenMP for ( size_t p=0; p<particles.get_num(); ++p ) { // use id_functor to translate linear index into 3D index const AvVec3i idx = id_functor.map_linear_to_3D_idx( ids[p], grid_dims ); pos_t pos[3] = { px[p], py[p], pz[p] }; //const AvVislib::AvVec3D<T> pos(positions[idx],positions[idx+1],positions[idx+2]); bool need_update = false; for ( int i=0; i<3; ++i ) { if ( idx[i]>td_h[i] && pos[i]<box_ext_h1[i] ) { pos[i] += box_ext[i]; need_update = true; } else if ( idx[i]<td_h[i] && pos[i]>box_ext_h2[i]) { pos[i] -= box_ext[i]; need_update = true; } } if ( need_update ) { px[p] = pos[0]; py[p] = pos[1]; pz[p] = pos[2]; } } // end loop over p } class AdPointDataReader { public: inline AvVec3i get_rank_dims() const { return procs_dims_; } inline AvVec3i get_global_particle_dims() const { return global_particle_dims_; } inline std::vector< std::shared_ptr<particles_with_ids_chunk> >& get_particles_per_rank( ) { return particles_per_proc_; } inline size_t load_data(const std::string& filename, const int my_rank, const int num_procs, const int stride ) { if ( filename.empty() ) { throw AdRuntimeException( "ERROR: AdPointDataReader::load_data(): missing filename." ); } if (my_rank>=num_procs || my_rank<0 || num_procs<1 ) { throw AdRuntimeException( "ERROR: AdPointDataReader::load_data(): invalid MPI rank parameters." ); } if ( stride<1 ) { throw AdRuntimeException( "ERROR: AdPointDataReader::load_data(): invalid 'stride' parameters." ); } // allocate bins to store the data particles_per_proc_.resize( num_procs ); for ( size_t i=0; i<particles_per_proc_.size(); ++i ) { particles_per_proc_[i] = std::shared_ptr<particles_with_ids_chunk>(new particles_with_ids_chunk); } const size_t num_particles = load_data_(filename, my_rank, num_procs, stride, procs_dims_, global_particle_dims_, particles_per_proc_ ); if ( num_particles == 0 ) { std::cerr << "WARNING: AdPointDataReaders()::load_data(): call returned 0 particles on MPI rank == " << my_rank << std::endl; } AD_ASSERT( checkInvariant_( my_rank, num_procs ), "ERROR: AdPointDataReaders(): checkInvariants_() failed." ); return num_particles; } protected: virtual size_t load_data_(const std::string& filename, const int my_rank, const int num_procs, const int stride, AvVec3i& procs_dims, AvVec3i& global_particle_dims, std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_per_proc ) = 0; private: bool checkInvariant_( const int my_rank, const int num_procs ) const { assert( procs_dims_[0]*procs_dims_[1]*procs_dims_[2] == num_procs ); size_t particle_count = 0; for ( int p=0; p<particles_per_proc_.size(); ++p ) { particle_count += particles_per_proc_[p]->get_num(); for ( int i=0; i<particles_per_proc_[p]->get_num(); ++i ) { // we are assuming column-major array order in the reaminder of the code, so readers have to remap if necessary const size_t lin_id = particles_per_proc_[p]->get_id(i); const AvVec3i lid = AdaptiveMassDeposit::ColumnMajorOrder::map_linear_to_3D_idx( lin_id, global_particle_dims_ ); AvVec3i tmp_block_offset; AvVec3i tmp_block_dims; // check if point is really assigned to the correct process AdaptiveMassDeposit::LagrangianRedistribution::get_patch_info<ColumnMajorOrder>(num_procs, p, procs_dims_, global_particle_dims_, tmp_block_offset, tmp_block_dims ); AD_ASSERT_C( lid[0]>=tmp_block_offset[0] && lid[0]<(tmp_block_offset[0]+tmp_block_dims[0]), { AvVec3i(lid[0],lid[1],lid[2]).print(); tmp_block_offset.print(); tmp_block_dims.print();} ); assert( lid[1]>=tmp_block_offset[1] && lid[1]<(tmp_block_offset[1]+tmp_block_dims[1]) ); assert( lid[2]>=tmp_block_offset[2] && lid[2]<(tmp_block_offset[2]+tmp_block_dims[2]) ); } } return true; } private: AvVec3i procs_dims_; AvVec3i global_particle_dims_; std::vector< std::shared_ptr<particles_with_ids_chunk> > particles_per_proc_; }; class AdPointDataReadersFactory { public: virtual std::shared_ptr<AdPointDataReader> create_reader() = 0; }; template< class ReaderClass > class AdPointDataReadersCreator : public AdPointDataReadersFactory { public: virtual std::shared_ptr<AdPointDataReader> create_reader() { return std::shared_ptr<ReaderClass> ( new ReaderClass() ); } }; class AdDarkSkyDataReader : public AdPointDataReader { protected: static inline void morton_2_grid( const int64_t morton, int64_t grid_id[3]) { static const int64_t mask = (1LL<<48)-1; int64_t key = morton & mask; int level = 0; grid_id[0] = 0; grid_id[1] = 0; grid_id[2] = 0; while ( key>0 ) { grid_id[2] += (key & 1) << level; key = key >> 1; grid_id[1] += (key & 1) << level; key = key >> 1; grid_id[0] += (key & 1) << level; key = key >> 1; level += 1; } std::swap( grid_id[0], grid_id[2] ); } static inline uint64_t grid_2_morton( const unsigned int x, const unsigned int y, const unsigned int z) { uint64_t morton = 0; static const uint64_t mb = (sizeof(uint64_t)*CHAR_BIT)/3; for ( uint64_t i=0; i<mb; ++i ) { const uint64_t i2 = 2*i; const uint64_t is = uint64_t(1) << i; morton |= ( (x&is)<<i2) | ( (y&is)<<(i2+1)) | ( (z&is)<<(i2+2)) ; } return morton; } static size_t get_header_size( const std::string& filename ) { std::filebuf fb; if ( !fb.open (filename.c_str(),std::ios::in|std::ios_base::binary) ) { throw AdRuntimeException( "ERROR: get_header_size(): failed to open file: " + filename ); } std::istream infile(&fb); char buffer[1024]; size_t security_c = 0; int sha1_chunks = -1; bool found_eoh = false; while ( infile.good() && security_c++<100000 ) { infile.getline (buffer, 1024 ); std::string line(buffer); if ( line.compare(0, 17, "int sha1_chunks =")==0 ) { std::string tmp = line.substr(line.find("=")+1, line.find(";")-line.find("=")-1); sha1_chunks = atoi(tmp.c_str()); } if ( line.compare(0, 9, "# SDF-EOH") == 0 ) { found_eoh = true; break; } } /* struct { unsigned int sha1_len; unsigned char sha1[20]; }[16]; */ // header size == current file pos + offet for sha1 const size_t header_size = size_t(infile.tellg()) + sha1_chunks*(sizeof(int)+20); fb.close(); if ( found_eoh==false || sha1_chunks<0 || security_c++>=100000 ) { throw AdRuntimeException("Failed to parse header of file: " + filename ); } return header_size; } virtual size_t load_data_(const std::string& filename, const int my_rank, const int num_procs, const int stride, AvVec3i& procs_dims, AvVec3i& global_particle_dims, std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_per_rank ) { #if 1 // first get header size of SDF file unsigned long header_size = 0; if ( my_rank== 0 ) { header_size = get_header_size(filename); } CHECK_MPI_ERROR( MPI_Bcast( &header_size, 1, MPI_LONG_INT, 0, MPI_COMM_WORLD) ); if ( header_size==0 ) { throw AdRuntimeException( "ERROR: AdDarkSkyDataReader::load_data_(): invalid header size" ); } MPI_File file_handle; CHECK_MPI_ERROR( MPI_File_open( MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handle) ); //size_t file_size = get_file_size(filename); MPI_Offset file_size = 0; CHECK_MPI_ERROR( MPI_File_get_size( file_handle, &file_size ) ); /* struct darksky_particle { float x, y, z; float vx, vy, vz; int64_t id; } ds_part_buffer; */ static const size_t bytes_per_particle = 6*sizeof(float)+sizeof(int64_t); static const int ids_offset = 6*sizeof(float); // hack: assuming that number of particle structs is divisible by number of processors const uint64_t total_num_particles = (file_size-header_size)/(bytes_per_particle); if ( (file_size-header_size)%bytes_per_particle!=0 ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): inconsistent file size "); } const size_t linear_particle_dims = rint( cbrt(total_num_particles) ); if ( linear_particle_dims*linear_particle_dims*linear_particle_dims!= total_num_particles ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): lagrangian grid not cubical "); } else { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): linear lagrangian grid dimension: " << linear_particle_dims << std::endl; } const size_t particles_per_proc = ceil( float(total_num_particles)/num_procs); const unsigned int src_dims[3] = { static_cast<unsigned int>(linear_particle_dims/stride), static_cast<unsigned int>(linear_particle_dims/stride), static_cast<unsigned int>(linear_particle_dims/stride) }; unsigned int dst_dims[3]; ProcessLayout::find_best_match( num_procs, src_dims, dst_dims); procs_dims = AvVec3i( dst_dims[0],dst_dims[1],dst_dims[2] ); const AvVec3i process_3D_idx = ColumnMajorOrder::map_linear_to_3D_idx( uint64_t(my_rank), procs_dims );; int effective_rank = -1; const bool power_of_two = (procs_dims[0]>0) && ( !( procs_dims[0] & (procs_dims[0]-1)) ); if ( power_of_two && procs_dims[1] == procs_dims[0] && procs_dims[2]==procs_dims[0] ) { for ( int64_t rank=0; rank<num_procs; ++rank ) { int64_t lid[3] = {0,0,0}; morton_2_grid(int64_t( rank ),lid); AD_ASSERT( grid_2_morton(lid[0],lid[1],lid[2])==uint64_t(rank), "" ); if ( AvVec3i(lid[0],lid[1],lid[2])==process_3D_idx ) { effective_rank = rank; break; } } } else { effective_rank = my_rank; } if ( effective_rank<0 || effective_rank>=num_procs ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): inconsistent effective process id. "); } global_particle_dims = AvVec3i(linear_particle_dims,linear_particle_dims,linear_particle_dims); if ( global_particle_dims[0]%stride!=0 || global_particle_dims[1]%stride!=0 || global_particle_dims[2]%stride!=0 ) { throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): invalid stride for data dimensions "); } global_particle_dims[0] /= stride; global_particle_dims[1] /= stride; global_particle_dims[2] /= stride; AD_VERBOSE( 1, { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): (MPI_rank==" << my_rank << "): Starting to load " << particles_per_proc << " of the total " << total_num_particles << " particles. " << std::endl; } ); // compute number of cells per processor (upper rows could contain fewer particles) const AvVec3i cells_per_proc((global_particle_dims[0]-1 + (procs_dims[0]-1))/procs_dims[0], (global_particle_dims[1]-1 + (procs_dims[1]-1))/procs_dims[1], (global_particle_dims[2]-1 + (procs_dims[2]-1))/procs_dims[2] ); const size_t start = effective_rank*particles_per_proc; //const size_t start = my_rank*particles_per_proc; // skip to start of our piece of the cake CHECK_MPI_ERROR( MPI_File_set_view( file_handle, header_size+start*bytes_per_particle, MPI_CHAR, MPI_CHAR, "native", MPI_INFO_NULL ) ); static const size_t particles_per_pass = 50000000; static const size_t buffer_size = particles_per_pass*( bytes_per_particle ); std::vector<char> buffer(buffer_size,0); size_t c = start; size_t loc_c = 0; bool file_good = true; size_t num_effective_particles = 0; while ( file_good && loc_c<particles_per_proc && c<total_num_particles ) { const size_t new_size = std::min(buffer.size()/bytes_per_particle, std::min( particles_per_proc-loc_c,size_t(total_num_particles-c)) ); buffer.resize( new_size*bytes_per_particle ); if ( buffer.empty() ) { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): BREAK. " << std::endl; break; } MPI_Status status; if ( MPI_File_read( file_handle, &buffer[0], buffer.size(), MPI_CHAR, &status ) != MPI_SUCCESS ) { std::cout << "WARNING: AdDarkSkyDataReader::load_data_(): file_handle = " << file_handle << " buffer.size()=" << buffer.size() << std::endl; throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): Failed to read all particles for lagrangian input grid."); } AD_ASSERT( buffer.size()%bytes_per_particle==0, "ERROR: AdDarkSkyDataReader::load_data_(): invalid number of read bytes" ); const size_t num_read_particles = buffer.size()/bytes_per_particle; #pragma omp parallel for reduction(+:loc_c) reduction(+:c) //schedule(dynamic) for ( size_t i=0; i<num_read_particles; ++i ) { const size_t bytes_counter = i*bytes_per_particle; const float* float_ptr = reinterpret_cast<const float*>(&buffer.front()+bytes_counter); const float pos[3] = { float_ptr[0],float_ptr[1], float_ptr[2] }; const int64_t id = *(reinterpret_cast<int64_t*>( &buffer.front()+bytes_counter+ids_offset )); int64_t lid[3]; morton_2_grid(id, lid); AD_ASSERT( lid[0]>=0 || lid[1]>=0 || lid[2]>=0 , "ERROR: AdDarkSkyDataReader::load_data_(): invalid dark sky id" ); AD_ASSERT( lid[0]<200000 || lid[1]<200000 || lid[2]<200000 , "ERROR: AdDarkSkyDataReader::load_data_(): invalid dark sky id" ); const bool skip_particle = ( stride!=1 && (lid[0]%stride!=0 || lid[1]%stride!=0 || lid[2]%stride!=0) ); if ( !skip_particle ) { // compute effective particles ID lid[0] /= stride; lid[1] /= stride; lid[2] /= stride; particle_with_id new_part; //const pos_t pos[3] = { pos[0],pos[1],pos[2]); const ids_t lin_id = lid[0]+(global_particle_dims[0])*(lid[1]+lid[2]*(global_particle_dims[1])); int target_ranks[8]; const int num_results = LagrangianRedistribution::get_target_processes<ColumnMajorOrder>(AvVec3i(lid[0],lid[1],lid[2]), procs_dims, global_particle_dims , AvVec3i(cells_per_proc), target_ranks ); // to-do: get rid of critial section using private data and reduction .... #pragma omp critical { for ( int n=0; n<num_results; ++n ) { #ifndef NDEBUG { const AvVec3i idx_3D = ColumnMajorOrder::map_linear_to_3D_idx( lin_id, global_particle_dims ); assert( idx_3D==AvVec3i(lid[0],lid[1],lid[2]) ); AvVec3i tmp_block_offset; AvVec3i tmp_block_dims; // check if point is really assigned to the correct process AdaptiveMassDeposit::LagrangianRedistribution::get_patch_info<ColumnMajorOrder>(num_procs, target_ranks[n], procs_dims, global_particle_dims, tmp_block_offset, tmp_block_dims ); AD_ASSERT_C( lid[0]>=tmp_block_offset[0] && lid[0]<(tmp_block_offset[0]+tmp_block_dims[0]), { AvVec3i(lid[0],lid[1],lid[2]).print(); tmp_block_offset.print(); tmp_block_dims.print();} ); assert( lid[1]>=tmp_block_offset[1] && lid[1]<(tmp_block_offset[1]+tmp_block_dims[1]) ); assert( lid[2]>=tmp_block_offset[2] && lid[2]<(tmp_block_offset[2]+tmp_block_dims[2]) ); } #endif particles_per_rank[target_ranks[n]]->push_back( pos[0],pos[1],pos[2], lin_id ); } ++num_effective_particles; } } // end if (!skip_particle) // save using reduction pragma ++loc_c; ++c; } // end for i<num_read_particles AD_VERBOSE( 1, {std::cout << "INFO: AdDarkSkyDataReader::load_data_(): (MPI_rank==" << my_rank << "): Read " << 100.*loc_c/float(particles_per_proc) << "% of its particles." << std::endl;} ); } // end while CHECK_MPI_ERROR( MPI_File_close( &file_handle ) ); AD_VERBOSE( 1, { std::cout << "INFO: AdDarkSkyDataReader::load_data_(): (MPI_rank==" << my_rank << ") Finished reading " << loc_c << " particles." << std::endl; } ); { long long int global_num_particles = 0; long long int local_num_particles = num_effective_particles; CHECK_MPI_ERROR( MPI_Allreduce(&local_num_particles, &global_num_particles, 1, MPI_LONG_LONG, MPI_SUM, MPI_COMM_WORLD) ); if( global_num_particles != size_t(global_particle_dims[0])*size_t(global_particle_dims[1])*size_t(global_particle_dims[2]) ) { std::cout << global_num_particles << std::endl; std::cout << size_t(global_particle_dims[0])*size_t(global_particle_dims[1])*size_t(global_particle_dims[2]) << std::endl; global_particle_dims.print(); throw AdRuntimeException("ERROR: AdDarkSkyDataReader::load_data_(): inconsistent global particle number"); } } return num_effective_particles; #endif } }; class AdSortedByIdsPointDataReader : public AdPointDataReader { protected: virtual size_t load_data_(const std::string& filename, const int my_rank, const int num_procs, const int stride, AvVec3i& procs_dims, AvVec3i& global_particle_dims, std::vector< std::shared_ptr<particles_with_ids_chunk> >& particles_per_proc) { assert( particles_per_proc.size() == num_procs ); if ( stride>1 ) { std::cout << "WARNING: AdSortedByIdsPointDataReader::load__data(): 'stride' parameter currently not supported - will ignore it ... " << std::endl; } MPI_File file_handle = 0; if ( MPI_File_open( MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file_handle )!=MPI_SUCCESS ) { throw AdRuntimeException("ERROR: AdInitialization_IDS::load_particle_data(): failed to open file: " + filename ); } MPI_Offset file_size = 0; CHECK_MPI_ERROR( MPI_File_get_size( file_handle, &file_size ) ); static const size_t bytes_per_particle = 3*sizeof(float); if ( file_size%bytes_per_particle!=0 ) { throw AdRuntimeException("ERROR: AdInitialization_IDS::load_particle_data(): inconsistent file size "); } const uint64_t global_num_particles = file_size/bytes_per_particle; global_particle_dims = AvVec3i( rint(cbrt(global_num_particles)) ); if ( size_t(global_particle_dims[0])*size_t(global_particle_dims[1])*size_t(global_particle_dims[2])!= global_num_particles ) { throw AdRuntimeException("ERROR: AdInitialization_IDS::load_particle_data(): lagrangian grid not cubical "); } { const unsigned int src_dims[3] = { static_cast<unsigned int>(global_particle_dims[0]), static_cast<unsigned int>(global_particle_dims[1]), static_cast<unsigned int>(global_particle_dims[2]) }; unsigned int dst_dims[3]; ProcessLayout::find_best_match( num_procs, src_dims, dst_dims); procs_dims = AvVec3i( dst_dims[0], dst_dims[1], dst_dims[2] ); } AvVec3i block_offset; AvVec3i particle_block_dims; LagrangianRedistribution::get_patch_info< ColumnMajorOrder >( num_procs, my_rank, procs_dims, global_particle_dims, block_offset, particle_block_dims ); CHECK_MPI_ERROR( MPI_File_set_view( file_handle, 0, MPI_FLOAT, MPI_FLOAT, "native", MPI_INFO_NULL ) ); std::vector<float> buffer( 3*particle_block_dims[0] ); size_t c = 0; for ( size_t k=0; k<particle_block_dims[2]; ++k ) { for ( size_t j=0; j<particle_block_dims[1]; ++j ) { const size_t linear_offset = block_offset[0] + global_particle_dims[0]*( (block_offset[1]+j) + global_particle_dims[1]*(block_offset[2]+k) ) ; // are we still inside the grid of active particles ? AD_ASSERT( linear_offset<global_num_particles, "" ); AD_ASSERT_C( (linear_offset+particle_block_dims[0])<=global_num_particles, { printf("ERROR: %lu, %i, %llu\n",linear_offset,particle_block_dims[0], (long long unsigned int)global_num_particles);} ); AD_ASSERT( bytes_per_particle*(linear_offset+particle_block_dims[0])<=size_t(file_size), "bug"); MPI_Status status; CHECK_MPI_ERROR( MPI_File_read_at( file_handle, 3*linear_offset, &buffer[0], buffer.size(), MPI_FLOAT, &status ) ); { int count = 0; CHECK_MPI_ERROR( MPI_Get_count( &status, MPI_FLOAT, &count )); AD_ASSERT_C( size_t(count)==buffer.size() && size_t(count)==3*particle_block_dims[0], { printf("ERROR: file I/O: invalid number of read items(%i) vs particle_lock_dims[0](%i). index=[%lu,%lu], offset==%lu\n",count,particle_block_dims[0],j,k, linear_offset);} ); } for ( size_t i=0; i<particle_block_dims[0]; ++i ) { const size_t idx = 3*i; AD_ASSERT( (idx+2) < buffer.size(), "bug" ); const pos_t pos[3] = { buffer[idx], buffer[idx+1], buffer[idx+2] }; const ids_t id = linear_offset+i; assert( (block_offset[0]+i) < global_particle_dims[0] ); assert( (block_offset[1]+j) < global_particle_dims[1] ); assert( (block_offset[2]+k) < global_particle_dims[2] ); // per construction each particle read by this rank is for this rank ... particles_per_proc[my_rank]->push_back( pos[0],pos[1],pos[2],id ); ++c; } } } AD_ASSERT_C( particles_per_proc[my_rank]->get_num() == (particle_block_dims[0]*particle_block_dims[1]*particle_block_dims[2]), { printf("%lu, %i, %i, %i\n", particles_per_proc[my_rank]->get_num(), particle_block_dims[0], particle_block_dims[1], particle_block_dims[2]);}); AD_VERBOSE(0, { std::cout << "INFO: AdInitialization_IDS::load_particle_data(): Process " << my_rank << " read " << c << " particles. Closing file." << std::endl; } ); CHECK_MPI_ERROR( MPI_File_close( &file_handle ) ); return c; } }; static std::shared_ptr<AdPointDataReader> get_reader( const std::string& format ) { if ( format.find("SORTED_BY_ID")!=std::string::npos ) { return std::shared_ptr<AdPointDataReader>( new AdSortedByIdsPointDataReader() ); } else if ( format.find("DARK_SKY")!=std::string::npos ) { return std::shared_ptr<AdPointDataReader>( new AdDarkSkyDataReader() ); } else { throw AdRuntimeException("ERROR: id_type " + format + " not supported."); } } } #endif
omp_simd_aligned2.c
//Variable examples of using simd directives void foo (int n, double *a, double* b) { for (int i=0; i<n; i++) a[i]=b[i]; } void foo2 (int n, double *a, double* b) { for (int i=0; i<n; i++) a[i]=b[i]; } void foo3 (int n, double *a, double* b) { int j=0; for (int i=0; i<n; i++,j++) { a[i]=b[i]+j; } } void foo32 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void foo33 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void fooAligned (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void fooAligned2 (int n, double *a, double* b) { int j=0, k=0; #pragma omp simd aligned(j,k:1) for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } double work( double *a, double *b, int n ) { int i; double tmp, sum; sum = 0.0; for (i = 0; i < n; i++) { tmp = a[i] + b[i]; sum += tmp; } return sum; } #define N 45 int a[N], b[N], c[N]; void foo4(int i, double* P) { int j; for (i = 0; i < 999; ++i) { j = P[i]; } } void work2( double **a, double **b, double **c, int n ) { int i, j; double tmp; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { tmp = a[i][j] + b[i][j]; c[i][j] = tmp; } } } void work3( double **a, double **b, double **c, int n ) { int i, j; double tmp; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { tmp = a[i][j] + b[i][j]; c[i][j] = tmp; } } } // declare simd can show up several times! float bar(int * p) { *p = *p +10; return *p; } // declare simd can show up several times! float bar2(int * p) { *p = *p +10; return *p; }
task-taskgroup-unrelated.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char* argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) #pragma omp master { #pragma omp task shared(var) { var++; // Give master thread time to execute the task in the taskgroup. sleep(1); } #pragma omp taskgroup { #pragma omp task if(0) { // Dummy task. } // Give other threads time to steal the tasks. sleep(1); } var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK: Write of size 4 // CHECK: #0 .omp_outlined. // CHECK: Previous write of size 4 // CHECK: #0 .omp_outlined. // CHECK: DONE
inv_power.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include "nb/memory_bot.h" #include "nb/solver_bot.h" #include "../sparse_struct.h" #define POW2(a) ((a)*(a)) int nb_sparse_eigen_ipower(const nb_sparse_t *const A, nb_solver_t solver, int h, double mu, double **_eigenvecs,/* Out */ double *_eigenvals, /* Out */ int* it, /* Out */ double tolerance, uint32_t omp_parallel_threads) { /* The program must receive all the pointers allocated, where * > A is a nb_sparse_t matrix * > _eigenvecs is an array of size h to store h eigenvectors. * > _eigenvals is an array of size h to store the h * eigenvalues approximated. * > h is the number of eigenvalues to be computed. * > '*it' will store (after computation) the iterations needed * to compute each eigenvalue (is a return value). */ /* Declare structures and variables to be used */ uint32_t i, j, c, d; /* Iterative variables */ double pnorm, rnorm2; /* Allocate memory for structures */ double* p = nb_allocate_zero_mem(A->N * sizeof(double)); double* z = nb_allocate_zero_mem(A->N * sizeof(double)); /* Set M in A (copy ptr to modify const A, it will be restored) */ nb_sparse_t* A_ptr_copy = (nb_sparse_t*)A; if (mu != 0.0) for (c = 0; c < A->N; c++) nb_sparse_add(A_ptr_copy, c, c, -mu); /* M = A - mu*I */ /* LU Decomposition in case of LU Solver */ nb_sparse_t *L = NULL; nb_sparse_t *U = NULL; if (NB_SOLVER_CHK == solver) { nb_sparse_alloc_LU(A, &L, &U); nb_sparse_decompose_Cholesky(A, L, U, omp_parallel_threads); } else if (NB_SOLVER_LUD == solver) { nb_sparse_alloc_LU(A, &L, &U); nb_sparse_decompose_LU(A, L, U, omp_parallel_threads); } /* Deflation inverse power method */ for (i = 0; i < h; i++) { it[i] = 0; rnorm2 = 1; /* Initialize q0 such that ||qk||=1 */ _eigenvecs[i][0] = 1; for(j=1; j< A->N; j++) _eigenvecs[i][j] = 0; /* Start loop */ double rnorm2_diff = 1; while (rnorm2 > POW2(tolerance) && rnorm2_diff > POW2(tolerance)) { /* Step 1 */ if (NB_SOLVER_CHK == solver || NB_SOLVER_LUD == solver) nb_sparse_solve_LU(L, U, _eigenvecs[i], p); else if (NB_SOLVER_CGJ == solver) nb_sparse_solve_CG_precond_Jacobi(A,_eigenvecs[i], p, nb_sparse_get_size(A)*10, 1e-3, NULL, NULL, omp_parallel_threads); else nb_sparse_solve_conjugate_gradient(A,_eigenvecs[i], p, nb_sparse_get_size(A)*10, 1e-3, NULL, NULL, omp_parallel_threads); /* Step 2 */ pnorm = nb_vector_get_norm(p, A->N); for(c=0; c < A->N; c++) _eigenvecs[i][c] = p[c]/pnorm; /* Step 3 */ for (j = 0; j < i; j++){ double alpha = 0; #pragma omp parallel for reduction(+:alpha) num_threads(omp_parallel_threads) schedule(guided) private(c) for(c=0; c < A->N; c++) alpha += _eigenvecs[i][c]*_eigenvecs[j][c]; #pragma omp parallel for num_threads(omp_parallel_threads) private(c) for(c=0; c < A->N; c++) _eigenvecs[i][c] -= alpha*_eigenvecs[j][c]; } /* Step 4 */ /* Paralelize the operation zk = A*qk */ #pragma omp parallel for num_threads(omp_parallel_threads) schedule(guided) private(d) private(c) for(c=0; c < A->N; c++){ z[c] = 0; for(d=0; d < A->rows_size[c]; d++){ double aii = A->rows_values[c][d]; if(c == d) aii += mu; z[c] += aii *(_eigenvecs[i][A->rows_index[c][d]]); } } /* Step 5 */ double sigma = 0; for(c=0; c < A->N; c++) sigma += _eigenvecs[i][c]*z[c]; _eigenvals[i] = sigma; /* Step 6 and 7 */ rnorm2_diff = rnorm2; rnorm2 = 0; for(c=0; c < A->N; c++) rnorm2 += POW2(z[c]-sigma*_eigenvecs[i][c]); rnorm2_diff = fabs(rnorm2_diff-rnorm2); it[i]++; } } /* Restore A */ if (mu != 0.0) for (c = 0; c < A->N; c++) nb_sparse_add(A_ptr_copy, c, c, mu); /* A = M + mu*I */ /* Destroy LU decomposition */ if (NB_SOLVER_CHK == solver || NB_SOLVER_LUD == solver) { nb_sparse_destroy(U); nb_sparse_destroy(L); } /* Free memory */ nb_free_mem(p); nb_free_mem(z); return 0; }
GB_unaryop__abs_int32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint32 // op(A') function: GB_tran__abs_int32_uint32 // C type: int32_t // A type: uint32_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint32 ( int32_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
new_torord_fkatp_endo.c
#include "new_torord_fkatp_endo.h" #include <stdlib.h> real max_step; real min_step; real abstol; real reltol; bool adpt; real *ode_dt, *ode_previous_dt, *ode_time_new; GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; //for count and m } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { log_to_stdout_and_file("Using New_ToRORd_fkatp_endo CPU model\n"); uint32_t num_cells = solver->original_num_cells; solver->sv = (real*)malloc(NEQ*num_cells*sizeof(real)); max_step = solver->max_dt; min_step = solver->min_dt; abstol = solver->abs_tol; reltol = solver->rel_tol; adpt = solver->adaptive; if(adpt) { ode_dt = (real*)malloc(num_cells*sizeof(real)); OMP(parallel for) for(int i = 0; i < num_cells; i++) { ode_dt[i] = solver->min_dt; } ode_previous_dt = (real*)calloc(num_cells, sizeof(real)); ode_time_new = (real*)calloc(num_cells, sizeof(real)); log_to_stdout_and_file("Using Adaptive Euler model to solve the ODEs\n"); } else { log_to_stdout_and_file("Using Euler model to solve the ODEs\n"); } OMP(parallel for) for(uint32_t i = 0; i < num_cells; i++) { real *sv = &solver->sv[i * NEQ]; sv[0] = -88.7638; sv[1] = 0.0111; sv[2] = 7.0305e-5; sv[3] = 12.1025; sv[4] = 12.1029; sv[5] = 142.3002; sv[6] = 142.3002; sv[7] = 1.5211; sv[8] = 1.5214; sv[9] = 8.1583e-05; sv[10] = 8.0572e-4; sv[11] = 0.8286; sv[12] = 0.8284; sv[13] = 0.6707; sv[14] = 0.8281; sv[15] = 1.629e-4; sv[16] = 0.5255; sv[17] = 0.2872; sv[18] = 9.5098e-4; sv[19] = 0.9996; sv[20] = 0.5936; sv[21] = 4.8454e-4; sv[22] = 0.9996; sv[23] = 0.6538; sv[24] = 8.1084e-9; sv[25] = 1.0; sv[26] = 0.939; sv[27] = 1.0; sv[28] = 0.9999; sv[29] = 1.0; sv[30] = 1.0; sv[31] = 1.0; sv[32] = 6.6462e-4; sv[33] = 0.0012; sv[34] = 7.0344e-4; sv[35] = 8.5109e-4; sv[36] = 0.9981; sv[37] = 1.3289e-5; sv[38] = 3.7585e-4; sv[39] = 0.248; sv[40] = 1.7707e-4; sv[41] = 1.6129e-22; sv[42] = 1.2475e-20; } } SOLVE_MODEL_ODES(solve_model_odes_cpu) { uint32_t sv_id; size_t num_cells_to_solve = ode_solver->num_cells_to_solve; uint32_t * cells_to_solve = ode_solver->cells_to_solve; real *sv = ode_solver->sv; real dt = ode_solver->min_dt; uint32_t num_steps = ode_solver->num_steps; #pragma omp parallel for private(sv_id) for (u_int32_t i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; if(adpt) { solve_forward_euler_cpu_adpt(sv + (sv_id * NEQ), stim_currents[i], current_t + dt, sv_id); } else { for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int sv_id) { const real _beta_safety_ = 0.8; int numEDO = NEQ; real rDY[numEDO]; real _tolerances_[numEDO]; real _aux_tol = 0.0; //initializes the variables ode_previous_dt[sv_id] = ode_dt[sv_id]; real edos_old_aux_[numEDO]; real edos_new_euler_[numEDO]; real *_k1__ = (real*) malloc(sizeof(real)*numEDO); real *_k2__ = (real*) malloc(sizeof(real)*numEDO); real *_k_aux__; real *dt = &ode_dt[sv_id]; real *time_new = &ode_time_new[sv_id]; real *previous_dt = &ode_previous_dt[sv_id]; if(*time_new + *dt > final_time) { *dt = final_time - *time_new; } RHS_cpu(sv, rDY, stim_curr, *dt); *time_new += *dt; for(int i = 0; i < numEDO; i++){ _k1__[i] = rDY[i]; } const double __tiny_ = pow(abstol, 2.0); int count = 0; int count_limit = (final_time - *time_new)/min_step; int aux_count_limit = count_limit+2000000; if(aux_count_limit > 0) { count_limit = aux_count_limit; } while(1) { for(int i = 0; i < numEDO; i++) { //stores the old variables in a vector edos_old_aux_[i] = sv[i]; //computes euler method edos_new_euler_[i] = _k1__[i] * *dt + edos_old_aux_[i]; //steps ahead to compute the rk2 method sv[i] = edos_new_euler_[i]; } *time_new += *dt; RHS_cpu(sv, rDY, stim_curr, *dt); *time_new -= *dt;//step back double greatestError = 0.0, auxError = 0.0; for(int i = 0; i < numEDO; i++) { //stores the new evaluation _k2__[i] = rDY[i]; _aux_tol = fabs(edos_new_euler_[i])*reltol; _tolerances_[i] = (abstol > _aux_tol )?abstol:_aux_tol; //finds the greatest error between the steps auxError = fabs(( (*dt/2.0)*(_k1__[i] - _k2__[i])) / _tolerances_[i]); greatestError = (auxError > greatestError) ? auxError : greatestError; } ///adapt the time step greatestError += __tiny_; *previous_dt = *dt; ///adapt the time step *dt = _beta_safety_ * (*dt) * sqrt(1.0f/greatestError); if (*time_new + *dt > final_time) { *dt = final_time - *time_new; } //it doesn't accept the solution if ( count < count_limit && (greatestError >= 1.0f)) { //restore the old values to do it again for(int i = 0; i < numEDO; i++) { sv[i] = edos_old_aux_[i]; } count++; //throw the results away and compute again } else{//it accepts the solutions if(greatestError >=1.0) { printf("Accepting solution with error > %lf \n", greatestError); } //printf("%e %e\n", _ode->time_new, edos_new_euler_[0]); if (*dt < min_step) { *dt = min_step; } else if (*dt > max_step && max_step != 0) { *dt = max_step; } if (*time_new + *dt > final_time) { *dt = final_time - *time_new; } _k_aux__ = _k2__; _k2__ = _k1__; _k1__ = _k_aux__; //it steps the method ahead, with euler solution for(int i = 0; i < numEDO; i++){ sv[i] = edos_new_euler_[i]; } if(*time_new + *previous_dt >= final_time){ if((fabs(final_time - *time_new) < 1.0e-5) ){ break; }else if(*time_new < final_time){ *dt = *previous_dt = final_time - *time_new; *time_new += *previous_dt; break; }else{ printf("Error: time_new %.20lf final_time %.20lf diff %e \n", *time_new , final_time, fabs(final_time - *time_new) ); break; } }else{ *time_new += *previous_dt; } } } free(_k1__); free(_k2__); } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // Get the stimulus current from the current cell real calc_I_stim = stim_current; //State variables real STATES[NEQ]; for (uint32_t i = 0; i < NEQ; i++) STATES[i] = sv[i]; #include "new_torord_fkatp_endo_common.inc.c" }
prior_box_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/transform.h" #include "paddle/pten/kernels/funcs/math_function.h" namespace paddle { namespace operators { constexpr int kPriorBoxFLOAT = 1; constexpr int kPriorBoxDOUBLE = 2; inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior, bool flip, std::vector<float>* output_aspect_ratior) { constexpr float epsilon = 1e-6; output_aspect_ratior->clear(); output_aspect_ratior->push_back(1.0f); for (size_t i = 0; i < input_aspect_ratior.size(); ++i) { float ar = input_aspect_ratior[i]; bool already_exist = false; for (size_t j = 0; j < output_aspect_ratior->size(); ++j) { if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) { already_exist = true; break; } } if (!already_exist) { output_aspect_ratior->push_back(ar); if (flip) { output_aspect_ratior->push_back(1.0f / ar); } } } } template <typename T, typename K> class PriorBoxOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes"); auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes"); auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto flip = ctx.Attr<bool>("flip"); auto clip = ctx.Attr<bool>("clip"); auto min_max_aspect_ratios_order = ctx.Attr<bool>("min_max_aspect_ratios_order"); std::vector<float> aspect_ratios; ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); K step_w = static_cast<K>(ctx.Attr<float>("step_w")); K step_h = static_cast<K>(ctx.Attr<float>("step_h")); K offset = static_cast<K>(ctx.Attr<float>("offset")); auto img_width = image->dims()[3]; auto img_height = image->dims()[2]; auto feature_width = input->dims()[3]; auto feature_height = input->dims()[2]; K step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<K>(img_width) / feature_width; step_height = static_cast<K>(img_height) / feature_height; } else { step_width = step_w; step_height = step_h; } int num_priors = aspect_ratios.size() * min_sizes.size(); if (max_sizes.size() > 0) { num_priors += max_sizes.size(); } boxes->mutable_data<K>(ctx.GetPlace()); vars->mutable_data<K>(ctx.GetPlace()); K* b_t = boxes->data<K>(); for (int h = 0; h < feature_height; ++h) { for (int w = 0; w < feature_width; ++w) { K center_x = (w + offset) * step_width; K center_y = (h + offset) * step_height; K box_width, box_height; for (size_t s = 0; s < min_sizes.size(); ++s) { auto min_size = min_sizes[s]; if (min_max_aspect_ratios_order) { box_width = box_height = min_size / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; if (max_sizes.size() > 0) { auto max_size = max_sizes[s]; // square prior with size sqrt(minSize * maxSize) box_width = box_height = sqrt(min_size * max_size) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } // priors with different aspect ratios for (size_t r = 0; r < aspect_ratios.size(); ++r) { float ar = aspect_ratios[r]; if (fabs(ar - 1.) < 1e-6) { continue; } box_width = min_size * sqrt(ar) / 2.; box_height = min_size / sqrt(ar) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } } else { // priors with different aspect ratios for (size_t r = 0; r < aspect_ratios.size(); ++r) { float ar = aspect_ratios[r]; box_width = min_size * sqrt(ar) / 2.; box_height = min_size / sqrt(ar) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } if (max_sizes.size() > 0) { auto max_size = max_sizes[s]; // square prior with size sqrt(minSize * maxSize) box_width = box_height = sqrt(min_size * max_size) / 2.; b_t[0] = (center_x - box_width) / img_width; b_t[1] = (center_y - box_height) / img_height; b_t[2] = (center_x + box_width) / img_width; b_t[3] = (center_y + box_height) / img_height; b_t += 4; } } } } } if (clip) { K* dt = boxes->data<K>(); std::transform(dt, dt + boxes->numel(), dt, [](K v) -> K { return std::min<K>(std::max<K>(v, 0.), 1.); }); } framework::Tensor var_t; var_t.mutable_data<K>( framework::make_ddim({1, static_cast<int>(variances.size())}), ctx.GetPlace()); auto var_et = framework::EigenTensor<K, 2>::From(var_t); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (size_t i = 0; i < variances.size(); ++i) { var_et(0, i) = variances[i]; } int box_num = feature_height * feature_width * num_priors; auto var_dim = vars->dims(); vars->Resize({box_num, static_cast<int>(variances.size())}); auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int i = 0; i < box_num; ++i) { for (size_t j = 0; j < variances.size(); ++j) { e_vars(i, j) = variances[j]; } } vars->Resize(var_dim); } }; // namespace operators } // namespace operators } // namespace paddle
C_pp.c
// this is autogenerated file, do not edit it. #include "ficus/ficus.h" struct _fx_N14K_form__ktyp_t_data_t; static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst); struct _fx_N14C_form__ctyp_t_data_t; static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst); struct _fx_N14C_form__cexp_t_data_t; static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst); struct _fx_N15C_form__cstmt_t_data_t; static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst); typedef struct _fx_LS_data_t { int_ rc; struct _fx_LS_data_t* tl; fx_str_t hd; } _fx_LS_data_t, *_fx_LS; typedef struct _fx_N17Options__optval_t { int tag; union { bool OptBool; int_ OptInt; fx_str_t OptString; } u; } _fx_N17Options__optval_t; typedef struct _fx_T2SN17Options__optval_t { fx_str_t t0; struct _fx_N17Options__optval_t t1; } _fx_T2SN17Options__optval_t; typedef struct _fx_LT2SN17Options__optval_t_data_t { int_ rc; struct _fx_LT2SN17Options__optval_t_data_t* tl; struct _fx_T2SN17Options__optval_t hd; } _fx_LT2SN17Options__optval_t_data_t, *_fx_LT2SN17Options__optval_t; typedef struct _fx_R18Options__options_t { struct _fx_LS_data_t* app_args; fx_str_t app_filename; bool arch64; bool force_rebuild; fx_str_t build_dir; fx_str_t build_rootdir; fx_str_t cflags; fx_str_t clibs; bool compile_by_cpp; fx_str_t filename; bool gen_c; struct _fx_LS_data_t* include_path; bool debug; struct _fx_LT2SN17Options__optval_t_data_t* defines; int_ optim_iters; int_ inline_thresh; bool enable_openmp; bool relax; bool use_preamble; bool make_app; int_ optimize_level; fx_str_t output_name; bool print_ast0; bool print_ast; bool print_k0; bool print_k; bool print_tokens; bool run_app; bool verbose; bool W_unused; } _fx_R18Options__options_t; typedef struct _fx_Ta2i { int_ t0; int_ t1; } _fx_Ta2i; typedef struct _fx_T2Ta2iS { struct _fx_Ta2i t0; fx_str_t t1; } _fx_T2Ta2iS; typedef struct _fx_R9Ast__id_t { int_ m; int_ i; int_ j; } _fx_R9Ast__id_t; typedef struct _fx_R10Ast__loc_t { int_ m_idx; int_ line0; int_ col0; int_ line1; int_ col1; } _fx_R10Ast__loc_t; typedef struct _fx_T2R9Ast__id_ti { struct _fx_R9Ast__id_t t0; int_ t1; } _fx_T2R9Ast__id_ti; typedef struct _fx_T2Bi { bool t0; int_ t1; } _fx_T2Bi; typedef struct _fx_N12Ast__scope_t { int tag; union { int_ ScBlock; struct _fx_T2Bi ScLoop; int_ ScFold; int_ ScArrMap; int_ ScMap; int_ ScTry; struct _fx_R9Ast__id_t ScFun; struct _fx_R9Ast__id_t ScClass; struct _fx_R9Ast__id_t ScInterface; int_ ScModule; } u; } _fx_N12Ast__scope_t; typedef struct _fx_LN12Ast__scope_t_data_t { int_ rc; struct _fx_LN12Ast__scope_t_data_t* tl; struct _fx_N12Ast__scope_t hd; } _fx_LN12Ast__scope_t_data_t, *_fx_LN12Ast__scope_t; typedef struct _fx_R16Ast__val_flags_t { bool val_flag_arg; bool val_flag_mutable; bool val_flag_temp; bool val_flag_tempref; bool val_flag_private; bool val_flag_subarray; bool val_flag_instance; struct _fx_T2R9Ast__id_ti val_flag_method; int_ val_flag_ctor; struct _fx_LN12Ast__scope_t_data_t* val_flag_global; } _fx_R16Ast__val_flags_t; typedef struct _fx_R17C_form__cdefval_t { struct _fx_R9Ast__id_t cv_name; struct _fx_N14C_form__ctyp_t_data_t* cv_typ; fx_str_t cv_cname; struct _fx_R16Ast__val_flags_t cv_flags; struct _fx_R10Ast__loc_t cv_loc; } _fx_R17C_form__cdefval_t; typedef struct _fx_R19C_form__cdeflabel_t { struct _fx_R9Ast__id_t cl_name; fx_str_t cl_cname; struct _fx_R10Ast__loc_t cl_loc; } _fx_R19C_form__cdeflabel_t; typedef struct _fx_T2R9Ast__id_tN14C_form__ctyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14C_form__ctyp_t hd; } _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t, *_fx_LT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_R23C_form__cdefinterface_t { struct _fx_R9Ast__id_t ci_name; fx_str_t ci_cname; struct _fx_R9Ast__id_t ci_id; struct _fx_R9Ast__id_t ci_vtbl; struct _fx_R9Ast__id_t ci_base; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* ci_all_methods; struct _fx_LN12Ast__scope_t_data_t* ci_scope; struct _fx_R10Ast__loc_t ci_loc; } _fx_R23C_form__cdefinterface_t; typedef struct _fx_rR23C_form__cdefinterface_t_data_t { int_ rc; struct _fx_R23C_form__cdefinterface_t data; } _fx_rR23C_form__cdefinterface_t_data_t, *_fx_rR23C_form__cdefinterface_t; typedef struct _fx_N17Ast__fun_constr_t { int tag; union { int_ CtorVariant; struct _fx_R9Ast__id_t CtorFP; struct _fx_R9Ast__id_t CtorExn; } u; } _fx_N17Ast__fun_constr_t; typedef struct _fx_R16Ast__fun_flags_t { int_ fun_flag_pure; bool fun_flag_ccode; bool fun_flag_have_keywords; bool fun_flag_inline; bool fun_flag_nothrow; bool fun_flag_really_nothrow; bool fun_flag_private; struct _fx_N17Ast__fun_constr_t fun_flag_ctor; struct _fx_R9Ast__id_t fun_flag_method_of; bool fun_flag_uses_fv; bool fun_flag_recursive; bool fun_flag_instance; } _fx_R16Ast__fun_flags_t; typedef struct _fx_LN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LN15C_form__cstmt_t_data_t* tl; struct _fx_N15C_form__cstmt_t_data_t* hd; } _fx_LN15C_form__cstmt_t_data_t, *_fx_LN15C_form__cstmt_t; typedef struct _fx_N19C_form__carg_attr_t { int tag; } _fx_N19C_form__carg_attr_t; typedef struct _fx_LN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LN19C_form__carg_attr_t_data_t* tl; struct _fx_N19C_form__carg_attr_t hd; } _fx_LN19C_form__carg_attr_t_data_t, *_fx_LN19C_form__carg_attr_t; typedef struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_LN19C_form__carg_attr_t_data_t* t2; } _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl; struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t hd; } _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t, *_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_R17C_form__cdeffun_t { struct _fx_R9Ast__id_t cf_name; fx_str_t cf_cname; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* cf_args; struct _fx_N14C_form__ctyp_t_data_t* cf_rt; struct _fx_LN15C_form__cstmt_t_data_t* cf_body; struct _fx_R16Ast__fun_flags_t cf_flags; struct _fx_LN12Ast__scope_t_data_t* cf_scope; struct _fx_R10Ast__loc_t cf_loc; } _fx_R17C_form__cdeffun_t; typedef struct _fx_rR17C_form__cdeffun_t_data_t { int_ rc; struct _fx_R17C_form__cdeffun_t data; } _fx_rR17C_form__cdeffun_t_data_t, *_fx_rR17C_form__cdeffun_t; typedef struct _fx_Ta2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; } _fx_Ta2R9Ast__id_t; typedef struct _fx_LR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* tl; struct _fx_R9Ast__id_t hd; } _fx_LR9Ast__id_t_data_t, *_fx_LR9Ast__id_t; typedef struct _fx_R17C_form__ctprops_t { bool ctp_scalar; bool ctp_complex; bool ctp_ptr; bool ctp_pass_by_ref; struct _fx_LR9Ast__id_t_data_t* ctp_make; struct _fx_Ta2R9Ast__id_t ctp_free; struct _fx_Ta2R9Ast__id_t ctp_copy; } _fx_R17C_form__ctprops_t; typedef struct _fx_R17C_form__cdeftyp_t { struct _fx_R9Ast__id_t ct_name; struct _fx_N14C_form__ctyp_t_data_t* ct_typ; fx_str_t ct_cname; struct _fx_R17C_form__ctprops_t ct_props; int_ ct_data_start; struct _fx_R9Ast__id_t ct_enum; struct _fx_LR9Ast__id_t_data_t* ct_ifaces; struct _fx_R9Ast__id_t ct_ifaces_id; struct _fx_LN12Ast__scope_t_data_t* ct_scope; struct _fx_R10Ast__loc_t ct_loc; } _fx_R17C_form__cdeftyp_t; typedef struct _fx_rR17C_form__cdeftyp_t_data_t { int_ rc; struct _fx_R17C_form__cdeftyp_t data; } _fx_rR17C_form__cdeftyp_t_data_t, *_fx_rR17C_form__cdeftyp_t; typedef struct _fx_Nt6option1N14C_form__cexp_t { int tag; union { struct _fx_N14C_form__cexp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__cexp_t; typedef struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t { struct _fx_R9Ast__id_t t0; struct _fx_Nt6option1N14C_form__cexp_t t1; } _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl; struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t hd; } _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t, *_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_R18C_form__cdefenum_t { struct _fx_R9Ast__id_t cenum_name; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* cenum_members; fx_str_t cenum_cname; struct _fx_LN12Ast__scope_t_data_t* cenum_scope; struct _fx_R10Ast__loc_t cenum_loc; } _fx_R18C_form__cdefenum_t; typedef struct _fx_rR18C_form__cdefenum_t_data_t { int_ rc; struct _fx_R18C_form__cdefenum_t data; } _fx_rR18C_form__cdefenum_t_data_t, *_fx_rR18C_form__cdefenum_t; typedef struct _fx_R19C_form__cdefmacro_t { struct _fx_R9Ast__id_t cm_name; fx_str_t cm_cname; struct _fx_LR9Ast__id_t_data_t* cm_args; struct _fx_LN15C_form__cstmt_t_data_t* cm_body; struct _fx_LN12Ast__scope_t_data_t* cm_scope; struct _fx_R10Ast__loc_t cm_loc; } _fx_R19C_form__cdefmacro_t; typedef struct _fx_rR19C_form__cdefmacro_t_data_t { int_ rc; struct _fx_R19C_form__cdefmacro_t data; } _fx_rR19C_form__cdefmacro_t_data_t, *_fx_rR19C_form__cdefmacro_t; typedef struct _fx_R17C_form__cdefexn_t { struct _fx_R9Ast__id_t cexn_name; fx_str_t cexn_cname; fx_str_t cexn_base_cname; struct _fx_N14C_form__ctyp_t_data_t* cexn_typ; bool cexn_std; struct _fx_R9Ast__id_t cexn_tag; struct _fx_R9Ast__id_t cexn_data; struct _fx_R9Ast__id_t cexn_info; struct _fx_R9Ast__id_t cexn_make; struct _fx_LN12Ast__scope_t_data_t* cexn_scope; struct _fx_R10Ast__loc_t cexn_loc; } _fx_R17C_form__cdefexn_t; typedef struct _fx_rR17C_form__cdefexn_t_data_t { int_ rc; struct _fx_R17C_form__cdefexn_t data; } _fx_rR17C_form__cdefexn_t_data_t, *_fx_rR17C_form__cdefexn_t; typedef struct _fx_N15C_form__cinfo_t { int tag; union { struct _fx_R17C_form__cdefval_t CVal; struct _fx_rR17C_form__cdeffun_t_data_t* CFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CTyp; struct _fx_rR17C_form__cdefexn_t_data_t* CExn; struct _fx_rR23C_form__cdefinterface_t_data_t* CInterface; struct _fx_rR18C_form__cdefenum_t_data_t* CEnum; struct _fx_R19C_form__cdeflabel_t CLabel; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacro; } u; } _fx_N15C_form__cinfo_t; typedef struct _fx_T2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14K_form__ktyp_t hd; } _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t, *_fx_LT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_LN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LN14K_form__ktyp_t_data_t* tl; struct _fx_N14K_form__ktyp_t_data_t* hd; } _fx_LN14K_form__ktyp_t_data_t, *_fx_LN14K_form__ktyp_t; typedef struct _fx_T2R10Ast__loc_tS { struct _fx_R10Ast__loc_t t0; fx_str_t t1; } _fx_T2R10Ast__loc_tS; typedef struct _fx_N13PP__ppstyle_t { int tag; } _fx_N13PP__ppstyle_t; typedef struct _fx_T3iiC { int_ t0; int_ t1; char_ t2; } _fx_T3iiC; typedef struct _fx_T2iN13PP__ppstyle_t { int_ t0; struct _fx_N13PP__ppstyle_t t1; } _fx_T2iN13PP__ppstyle_t; typedef struct _fx_N11PP__pptok_t { int tag; union { fx_str_t PPString; struct _fx_T3iiC PPBreak; struct _fx_T2iN13PP__ppstyle_t PPBegin; } u; } _fx_N11PP__pptok_t; typedef struct _fx_T2N11PP__pptok_ti { struct _fx_N11PP__pptok_t t0; int_ t1; } _fx_T2N11PP__pptok_ti; typedef struct _fx_R11PP__state_t { int_ space; int_ left; int_ right; int_ top; int_ bottom; int_ lefttotal; int_ righttotal; fx_arr_t q; fx_arr_t stack; fx_arr_t pp_stack; int_ pp_top; bool emptystack; } _fx_R11PP__state_t; typedef struct _fx_FPv1S { int (*fp)(fx_str_t*, void*); fx_fcv_t* fcv; } _fx_FPv1S; typedef struct _fx_FPLS0 { int (*fp)(struct _fx_LS_data_t**, void*); fx_fcv_t* fcv; } _fx_FPLS0; typedef struct _fx_rR11PP__state_t_data_t { int_ rc; struct _fx_R11PP__state_t data; } _fx_rR11PP__state_t_data_t, *_fx_rR11PP__state_t; typedef struct _fx_R5PP__t { int_ margin; int_ default_indent; struct _fx_FPv1S print_f; struct _fx_FPLS0 get_f; struct _fx_rR11PP__state_t_data_t* r; } _fx_R5PP__t; typedef struct _fx_T2il { int_ t0; int64_t t1; } _fx_T2il; typedef struct _fx_T2iq { int_ t0; uint64_t t1; } _fx_T2iq; typedef struct _fx_T2id { int_ t0; double t1; } _fx_T2id; typedef struct _fx_N14K_form__klit_t { int tag; union { int64_t KLitInt; struct _fx_T2il KLitSInt; struct _fx_T2iq KLitUInt; struct _fx_T2id KLitFloat; fx_str_t KLitString; char_ KLitChar; bool KLitBool; struct _fx_N14K_form__ktyp_t_data_t* KLitNil; } u; } _fx_N14K_form__klit_t; typedef struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t { struct _fx_LN14K_form__ktyp_t_data_t* t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_T2iN14K_form__ktyp_t { int_ t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2iN14K_form__ktyp_t; typedef struct _fx_N14K_form__ktyp_t_data_t { int_ rc; int tag; union { int_ KTypSInt; int_ KTypUInt; int_ KTypFloat; struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t KTypFun; struct _fx_LN14K_form__ktyp_t_data_t* KTypTuple; struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t KTypRecord; struct _fx_R9Ast__id_t KTypName; struct _fx_T2iN14K_form__ktyp_t KTypArray; struct _fx_N14K_form__ktyp_t_data_t* KTypVector; struct _fx_N14K_form__ktyp_t_data_t* KTypList; struct _fx_N14K_form__ktyp_t_data_t* KTypRef; } u; } _fx_N14K_form__ktyp_t_data_t, *_fx_N14K_form__ktyp_t; typedef struct _fx_Nt6option1N14C_form__ctyp_t { int tag; union { struct _fx_N14C_form__ctyp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__ctyp_t; typedef struct _fx_Nt6option1R9Ast__id_t { int tag; union { struct _fx_R9Ast__id_t Some; } u; } _fx_Nt6option1R9Ast__id_t; typedef struct _fx_N12Ast__cmpop_t { int tag; } _fx_N12Ast__cmpop_t; typedef struct _fx_T2R9Ast__id_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2R9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2SR10Ast__loc_t { fx_str_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2SR10Ast__loc_t; typedef struct _fx_N17C_form__cbinary_t { int tag; union { struct _fx_N12Ast__cmpop_t COpCmp; } u; } _fx_N17C_form__cbinary_t; typedef struct _fx_N16C_form__cunary_t { int tag; } _fx_N16C_form__cunary_t; typedef struct _fx_N19C_form__ctyp_attr_t { int tag; } _fx_N19C_form__ctyp_attr_t; typedef struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1; } _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LN14C_form__ctyp_t_data_t* tl; struct _fx_N14C_form__ctyp_t_data_t* hd; } _fx_LN14C_form__ctyp_t_data_t, *_fx_LN14C_form__ctyp_t; typedef struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t { struct _fx_LN14C_form__ctyp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t; typedef struct _fx_LN19C_form__ctyp_attr_t_data_t { int_ rc; struct _fx_LN19C_form__ctyp_attr_t_data_t* tl; struct _fx_N19C_form__ctyp_attr_t hd; } _fx_LN19C_form__ctyp_attr_t_data_t, *_fx_LN19C_form__ctyp_attr_t; typedef struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t { struct _fx_LN19C_form__ctyp_attr_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t; typedef struct _fx_T2iN14C_form__ctyp_t { int_ t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2iN14C_form__ctyp_t; typedef struct _fx_N14C_form__ctyp_t_data_t { int_ rc; int tag; union { int_ CTypSInt; int_ CTypUInt; int_ CTypFloat; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypStruct; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypUnion; struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t CTypFunRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawArray; struct _fx_T2iN14C_form__ctyp_t CTypArray; struct _fx_N14C_form__ctyp_t_data_t* CTypVector; struct _fx_R9Ast__id_t CTypName; } u; } _fx_N14C_form__ctyp_t_data_t, *_fx_N14C_form__ctyp_t; typedef struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14K_form__klit_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N17C_form__cbinary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N16C_form__cunary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_LN14C_form__cexp_t_data_t { int_ rc; struct _fx_LN14C_form__cexp_t_data_t* tl; struct _fx_N14C_form__cexp_t_data_t* hd; } _fx_LN14C_form__cexp_t_data_t, *_fx_LN14C_form__cexp_t; typedef struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_N14C_form__cexp_t_data_t { int_ rc; int tag; union { struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpIdent; struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t CExpLit; struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpBinary; struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpUnary; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpMem; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpArrow; struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t CExpCast; struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpTernary; struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpCall; struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpInit; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t CExpTyp; struct _fx_T2SR10Ast__loc_t CExpCCode; } u; } _fx_N14C_form__cexp_t_data_t, *_fx_N14C_form__cexp_t; typedef struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__cexp_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LN15C_form__cstmt_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN15C_form__cstmt_t { struct _fx_R9Ast__id_t t0; struct _fx_N15C_form__cstmt_t_data_t* t1; } _fx_T2R9Ast__id_tN15C_form__cstmt_t; typedef struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_N15C_form__cstmt_t_data_t* t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__ctyp_t t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_LN14C_form__cexp_t_data_t* t3; struct _fx_N15C_form__cstmt_t_data_t* t4; struct _fx_R10Ast__loc_t t5; } _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t { struct _fx_N15C_form__cstmt_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_N15C_form__cstmt_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t CStmtNop; struct _fx_T2SR10Ast__loc_t CComment; struct _fx_N14C_form__cexp_t_data_t* CExp; struct _fx_R10Ast__loc_t CStmtBreak; struct _fx_R10Ast__loc_t CStmtContinue; struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t CStmtReturn; struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t CStmtBlock; struct _fx_T2R9Ast__id_tN15C_form__cstmt_t CStmtSync; struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t CStmtIf; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtGoto; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtLabel; struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtFor; struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtWhile; struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t CStmtDoWhile; struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t CStmtSwitch; struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t CDefVal; struct _fx_rR17C_form__cdeffun_t_data_t* CDefFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CDefTyp; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardSym; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardTyp; struct _fx_rR18C_form__cdefenum_t_data_t* CDefEnum; struct _fx_rR23C_form__cdefinterface_t_data_t* CDefInterface; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacroDef; struct _fx_T2R9Ast__id_tR10Ast__loc_t CMacroUndef; struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t CMacroIf; struct _fx_T2SR10Ast__loc_t CMacroInclude; struct _fx_T2SR10Ast__loc_t CMacroPragma; } u; } _fx_N15C_form__cstmt_t_data_t, *_fx_N15C_form__cstmt_t; typedef struct _fx_N13C_pp__assoc_t { int tag; } _fx_N13C_pp__assoc_t; typedef struct _fx_T3SiN13C_pp__assoc_t { fx_str_t t0; int_ t1; struct _fx_N13C_pp__assoc_t t2; } _fx_T3SiN13C_pp__assoc_t; typedef struct { int_ rc; int_ data; } _fx_E4Exit_data_t; typedef struct { int_ rc; fx_str_t data; } _fx_E4Fail_data_t; typedef struct { int_ rc; struct _fx_T2Ta2iS data; } _fx_E22LexerUtils__LexerError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E17Ast__CompileError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E18Parser__ParseError_data_t; static void _fx_free_LS(struct _fx_LS_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LS, fx_free_str); } static int _fx_cons_LS(fx_str_t* hd, struct _fx_LS_data_t* tl, bool addref_tl, struct _fx_LS_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LS, fx_copy_str); } static void _fx_free_N17Options__optval_t(struct _fx_N17Options__optval_t* dst) { switch (dst->tag) { case 3: fx_free_str(&dst->u.OptString); break; default: ; } dst->tag = 0; } static void _fx_copy_N17Options__optval_t(struct _fx_N17Options__optval_t* src, struct _fx_N17Options__optval_t* dst) { dst->tag = src->tag; switch (src->tag) { case 3: fx_copy_str(&src->u.OptString, &dst->u.OptString); break; default: dst->u = src->u; } } static void _fx_free_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* dst) { fx_free_str(&dst->t0); _fx_free_N17Options__optval_t(&dst->t1); } static void _fx_copy_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* src, struct _fx_T2SN17Options__optval_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_N17Options__optval_t(&src->t1, &dst->t1); } static void _fx_make_T2SN17Options__optval_t( fx_str_t* t0, struct _fx_N17Options__optval_t* t1, struct _fx_T2SN17Options__optval_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_N17Options__optval_t(t1, &fx_result->t1); } static void _fx_free_LT2SN17Options__optval_t(struct _fx_LT2SN17Options__optval_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_free_T2SN17Options__optval_t); } static int _fx_cons_LT2SN17Options__optval_t( struct _fx_T2SN17Options__optval_t* hd, struct _fx_LT2SN17Options__optval_t_data_t* tl, bool addref_tl, struct _fx_LT2SN17Options__optval_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_copy_T2SN17Options__optval_t); } static void _fx_free_R18Options__options_t(struct _fx_R18Options__options_t* dst) { _fx_free_LS(&dst->app_args); fx_free_str(&dst->app_filename); fx_free_str(&dst->build_dir); fx_free_str(&dst->build_rootdir); fx_free_str(&dst->cflags); fx_free_str(&dst->clibs); fx_free_str(&dst->filename); _fx_free_LS(&dst->include_path); _fx_free_LT2SN17Options__optval_t(&dst->defines); fx_free_str(&dst->output_name); } static void _fx_copy_R18Options__options_t(struct _fx_R18Options__options_t* src, struct _fx_R18Options__options_t* dst) { FX_COPY_PTR(src->app_args, &dst->app_args); fx_copy_str(&src->app_filename, &dst->app_filename); dst->arch64 = src->arch64; dst->force_rebuild = src->force_rebuild; fx_copy_str(&src->build_dir, &dst->build_dir); fx_copy_str(&src->build_rootdir, &dst->build_rootdir); fx_copy_str(&src->cflags, &dst->cflags); fx_copy_str(&src->clibs, &dst->clibs); dst->compile_by_cpp = src->compile_by_cpp; fx_copy_str(&src->filename, &dst->filename); dst->gen_c = src->gen_c; FX_COPY_PTR(src->include_path, &dst->include_path); dst->debug = src->debug; FX_COPY_PTR(src->defines, &dst->defines); dst->optim_iters = src->optim_iters; dst->inline_thresh = src->inline_thresh; dst->enable_openmp = src->enable_openmp; dst->relax = src->relax; dst->use_preamble = src->use_preamble; dst->make_app = src->make_app; dst->optimize_level = src->optimize_level; fx_copy_str(&src->output_name, &dst->output_name); dst->print_ast0 = src->print_ast0; dst->print_ast = src->print_ast; dst->print_k0 = src->print_k0; dst->print_k = src->print_k; dst->print_tokens = src->print_tokens; dst->run_app = src->run_app; dst->verbose = src->verbose; dst->W_unused = src->W_unused; } static void _fx_make_R18Options__options_t( struct _fx_LS_data_t* r_app_args, fx_str_t* r_app_filename, bool r_arch64, bool r_force_rebuild, fx_str_t* r_build_dir, fx_str_t* r_build_rootdir, fx_str_t* r_cflags, fx_str_t* r_clibs, bool r_compile_by_cpp, fx_str_t* r_filename, bool r_gen_c, struct _fx_LS_data_t* r_include_path, bool r_debug, struct _fx_LT2SN17Options__optval_t_data_t* r_defines, int_ r_optim_iters, int_ r_inline_thresh, bool r_enable_openmp, bool r_relax, bool r_use_preamble, bool r_make_app, int_ r_optimize_level, fx_str_t* r_output_name, bool r_print_ast0, bool r_print_ast, bool r_print_k0, bool r_print_k, bool r_print_tokens, bool r_run_app, bool r_verbose, bool r_W_unused, struct _fx_R18Options__options_t* fx_result) { FX_COPY_PTR(r_app_args, &fx_result->app_args); fx_copy_str(r_app_filename, &fx_result->app_filename); fx_result->arch64 = r_arch64; fx_result->force_rebuild = r_force_rebuild; fx_copy_str(r_build_dir, &fx_result->build_dir); fx_copy_str(r_build_rootdir, &fx_result->build_rootdir); fx_copy_str(r_cflags, &fx_result->cflags); fx_copy_str(r_clibs, &fx_result->clibs); fx_result->compile_by_cpp = r_compile_by_cpp; fx_copy_str(r_filename, &fx_result->filename); fx_result->gen_c = r_gen_c; FX_COPY_PTR(r_include_path, &fx_result->include_path); fx_result->debug = r_debug; FX_COPY_PTR(r_defines, &fx_result->defines); fx_result->optim_iters = r_optim_iters; fx_result->inline_thresh = r_inline_thresh; fx_result->enable_openmp = r_enable_openmp; fx_result->relax = r_relax; fx_result->use_preamble = r_use_preamble; fx_result->make_app = r_make_app; fx_result->optimize_level = r_optimize_level; fx_copy_str(r_output_name, &fx_result->output_name); fx_result->print_ast0 = r_print_ast0; fx_result->print_ast = r_print_ast; fx_result->print_k0 = r_print_k0; fx_result->print_k = r_print_k; fx_result->print_tokens = r_print_tokens; fx_result->run_app = r_run_app; fx_result->verbose = r_verbose; fx_result->W_unused = r_W_unused; } static void _fx_free_T2Ta2iS(struct _fx_T2Ta2iS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2Ta2iS(struct _fx_T2Ta2iS* src, struct _fx_T2Ta2iS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2Ta2iS(struct _fx_Ta2i* t0, fx_str_t* t1, struct _fx_T2Ta2iS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static int _fx_cons_LN12Ast__scope_t( struct _fx_N12Ast__scope_t* hd, struct _fx_LN12Ast__scope_t_data_t* tl, bool addref_tl, struct _fx_LN12Ast__scope_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN12Ast__scope_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* dst) { fx_free_list_simple(&dst->val_flag_global); } static void _fx_copy_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* src, struct _fx_R16Ast__val_flags_t* dst) { dst->val_flag_arg = src->val_flag_arg; dst->val_flag_mutable = src->val_flag_mutable; dst->val_flag_temp = src->val_flag_temp; dst->val_flag_tempref = src->val_flag_tempref; dst->val_flag_private = src->val_flag_private; dst->val_flag_subarray = src->val_flag_subarray; dst->val_flag_instance = src->val_flag_instance; dst->val_flag_method = src->val_flag_method; dst->val_flag_ctor = src->val_flag_ctor; FX_COPY_PTR(src->val_flag_global, &dst->val_flag_global); } static void _fx_make_R16Ast__val_flags_t( bool r_val_flag_arg, bool r_val_flag_mutable, bool r_val_flag_temp, bool r_val_flag_tempref, bool r_val_flag_private, bool r_val_flag_subarray, bool r_val_flag_instance, struct _fx_T2R9Ast__id_ti* r_val_flag_method, int_ r_val_flag_ctor, struct _fx_LN12Ast__scope_t_data_t* r_val_flag_global, struct _fx_R16Ast__val_flags_t* fx_result) { fx_result->val_flag_arg = r_val_flag_arg; fx_result->val_flag_mutable = r_val_flag_mutable; fx_result->val_flag_temp = r_val_flag_temp; fx_result->val_flag_tempref = r_val_flag_tempref; fx_result->val_flag_private = r_val_flag_private; fx_result->val_flag_subarray = r_val_flag_subarray; fx_result->val_flag_instance = r_val_flag_instance; fx_result->val_flag_method = *r_val_flag_method; fx_result->val_flag_ctor = r_val_flag_ctor; FX_COPY_PTR(r_val_flag_global, &fx_result->val_flag_global); } static void _fx_free_R17C_form__cdefval_t(struct _fx_R17C_form__cdefval_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->cv_typ); fx_free_str(&dst->cv_cname); _fx_free_R16Ast__val_flags_t(&dst->cv_flags); } static void _fx_copy_R17C_form__cdefval_t(struct _fx_R17C_form__cdefval_t* src, struct _fx_R17C_form__cdefval_t* dst) { dst->cv_name = src->cv_name; FX_COPY_PTR(src->cv_typ, &dst->cv_typ); fx_copy_str(&src->cv_cname, &dst->cv_cname); _fx_copy_R16Ast__val_flags_t(&src->cv_flags, &dst->cv_flags); dst->cv_loc = src->cv_loc; } static void _fx_make_R17C_form__cdefval_t( struct _fx_R9Ast__id_t* r_cv_name, struct _fx_N14C_form__ctyp_t_data_t* r_cv_typ, fx_str_t* r_cv_cname, struct _fx_R16Ast__val_flags_t* r_cv_flags, struct _fx_R10Ast__loc_t* r_cv_loc, struct _fx_R17C_form__cdefval_t* fx_result) { fx_result->cv_name = *r_cv_name; FX_COPY_PTR(r_cv_typ, &fx_result->cv_typ); fx_copy_str(r_cv_cname, &fx_result->cv_cname); _fx_copy_R16Ast__val_flags_t(r_cv_flags, &fx_result->cv_flags); fx_result->cv_loc = *r_cv_loc; } static void _fx_free_R19C_form__cdeflabel_t(struct _fx_R19C_form__cdeflabel_t* dst) { fx_free_str(&dst->cl_cname); } static void _fx_copy_R19C_form__cdeflabel_t(struct _fx_R19C_form__cdeflabel_t* src, struct _fx_R19C_form__cdeflabel_t* dst) { dst->cl_name = src->cl_name; fx_copy_str(&src->cl_cname, &dst->cl_cname); dst->cl_loc = src->cl_loc; } static void _fx_make_R19C_form__cdeflabel_t( struct _fx_R9Ast__id_t* r_cl_name, fx_str_t* r_cl_cname, struct _fx_R10Ast__loc_t* r_cl_loc, struct _fx_R19C_form__cdeflabel_t* fx_result) { fx_result->cl_name = *r_cl_name; fx_copy_str(r_cl_cname, &fx_result->cl_cname); fx_result->cl_loc = *r_cl_loc; } static void _fx_free_T2R9Ast__id_tN14C_form__ctyp_t(struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_free_T2R9Ast__id_tN14C_form__ctyp_t); } static int _fx_cons_LT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* hd, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t); } static void _fx_free_R23C_form__cdefinterface_t(struct _fx_R23C_form__cdefinterface_t* dst) { fx_free_str(&dst->ci_cname); _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->ci_all_methods); fx_free_list_simple(&dst->ci_scope); } static void _fx_copy_R23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* src, struct _fx_R23C_form__cdefinterface_t* dst) { dst->ci_name = src->ci_name; fx_copy_str(&src->ci_cname, &dst->ci_cname); dst->ci_id = src->ci_id; dst->ci_vtbl = src->ci_vtbl; dst->ci_base = src->ci_base; FX_COPY_PTR(src->ci_all_methods, &dst->ci_all_methods); FX_COPY_PTR(src->ci_scope, &dst->ci_scope); dst->ci_loc = src->ci_loc; } static void _fx_make_R23C_form__cdefinterface_t( struct _fx_R9Ast__id_t* r_ci_name, fx_str_t* r_ci_cname, struct _fx_R9Ast__id_t* r_ci_id, struct _fx_R9Ast__id_t* r_ci_vtbl, struct _fx_R9Ast__id_t* r_ci_base, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* r_ci_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ci_scope, struct _fx_R10Ast__loc_t* r_ci_loc, struct _fx_R23C_form__cdefinterface_t* fx_result) { fx_result->ci_name = *r_ci_name; fx_copy_str(r_ci_cname, &fx_result->ci_cname); fx_result->ci_id = *r_ci_id; fx_result->ci_vtbl = *r_ci_vtbl; fx_result->ci_base = *r_ci_base; FX_COPY_PTR(r_ci_all_methods, &fx_result->ci_all_methods); FX_COPY_PTR(r_ci_scope, &fx_result->ci_scope); fx_result->ci_loc = *r_ci_loc; } static void _fx_free_rR23C_form__cdefinterface_t(struct _fx_rR23C_form__cdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_free_R23C_form__cdefinterface_t); } static int _fx_make_rR23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* arg, struct _fx_rR23C_form__cdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_copy_R23C_form__cdefinterface_t); } static void _fx_free_LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN15C_form__cstmt_t, _fx_free_N15C_form__cstmt_t); } static int _fx_cons_LN15C_form__cstmt_t( struct _fx_N15C_form__cstmt_t_data_t* hd, struct _fx_LN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN15C_form__cstmt_t, FX_COPY_PTR); } static int _fx_cons_LN19C_form__carg_attr_t( struct _fx_N19C_form__carg_attr_t* hd, struct _fx_LN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__carg_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* src, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_LN19C_form__carg_attr_t_data_t* t2, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static int _fx_cons_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* hd, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static void _fx_free_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* dst) { fx_free_str(&dst->cf_cname); _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&dst->cf_args); _fx_free_N14C_form__ctyp_t(&dst->cf_rt); _fx_free_LN15C_form__cstmt_t(&dst->cf_body); fx_free_list_simple(&dst->cf_scope); } static void _fx_copy_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* src, struct _fx_R17C_form__cdeffun_t* dst) { dst->cf_name = src->cf_name; fx_copy_str(&src->cf_cname, &dst->cf_cname); FX_COPY_PTR(src->cf_args, &dst->cf_args); FX_COPY_PTR(src->cf_rt, &dst->cf_rt); FX_COPY_PTR(src->cf_body, &dst->cf_body); dst->cf_flags = src->cf_flags; FX_COPY_PTR(src->cf_scope, &dst->cf_scope); dst->cf_loc = src->cf_loc; } static void _fx_make_R17C_form__cdeffun_t( struct _fx_R9Ast__id_t* r_cf_name, fx_str_t* r_cf_cname, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* r_cf_args, struct _fx_N14C_form__ctyp_t_data_t* r_cf_rt, struct _fx_LN15C_form__cstmt_t_data_t* r_cf_body, struct _fx_R16Ast__fun_flags_t* r_cf_flags, struct _fx_LN12Ast__scope_t_data_t* r_cf_scope, struct _fx_R10Ast__loc_t* r_cf_loc, struct _fx_R17C_form__cdeffun_t* fx_result) { fx_result->cf_name = *r_cf_name; fx_copy_str(r_cf_cname, &fx_result->cf_cname); FX_COPY_PTR(r_cf_args, &fx_result->cf_args); FX_COPY_PTR(r_cf_rt, &fx_result->cf_rt); FX_COPY_PTR(r_cf_body, &fx_result->cf_body); fx_result->cf_flags = *r_cf_flags; FX_COPY_PTR(r_cf_scope, &fx_result->cf_scope); fx_result->cf_loc = *r_cf_loc; } static void _fx_free_rR17C_form__cdeffun_t(struct _fx_rR17C_form__cdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_free_R17C_form__cdeffun_t); } static int _fx_make_rR17C_form__cdeffun_t( struct _fx_R17C_form__cdeffun_t* arg, struct _fx_rR17C_form__cdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_copy_R17C_form__cdeffun_t); } static int _fx_cons_LR9Ast__id_t( struct _fx_R9Ast__id_t* hd, struct _fx_LR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* dst) { fx_free_list_simple(&dst->ctp_make); } static void _fx_copy_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* src, struct _fx_R17C_form__ctprops_t* dst) { dst->ctp_scalar = src->ctp_scalar; dst->ctp_complex = src->ctp_complex; dst->ctp_ptr = src->ctp_ptr; dst->ctp_pass_by_ref = src->ctp_pass_by_ref; FX_COPY_PTR(src->ctp_make, &dst->ctp_make); dst->ctp_free = src->ctp_free; dst->ctp_copy = src->ctp_copy; } static void _fx_make_R17C_form__ctprops_t( bool r_ctp_scalar, bool r_ctp_complex, bool r_ctp_ptr, bool r_ctp_pass_by_ref, struct _fx_LR9Ast__id_t_data_t* r_ctp_make, struct _fx_Ta2R9Ast__id_t* r_ctp_free, struct _fx_Ta2R9Ast__id_t* r_ctp_copy, struct _fx_R17C_form__ctprops_t* fx_result) { fx_result->ctp_scalar = r_ctp_scalar; fx_result->ctp_complex = r_ctp_complex; fx_result->ctp_ptr = r_ctp_ptr; fx_result->ctp_pass_by_ref = r_ctp_pass_by_ref; FX_COPY_PTR(r_ctp_make, &fx_result->ctp_make); fx_result->ctp_free = *r_ctp_free; fx_result->ctp_copy = *r_ctp_copy; } static void _fx_free_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->ct_typ); fx_free_str(&dst->ct_cname); _fx_free_R17C_form__ctprops_t(&dst->ct_props); fx_free_list_simple(&dst->ct_ifaces); fx_free_list_simple(&dst->ct_scope); } static void _fx_copy_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* src, struct _fx_R17C_form__cdeftyp_t* dst) { dst->ct_name = src->ct_name; FX_COPY_PTR(src->ct_typ, &dst->ct_typ); fx_copy_str(&src->ct_cname, &dst->ct_cname); _fx_copy_R17C_form__ctprops_t(&src->ct_props, &dst->ct_props); dst->ct_data_start = src->ct_data_start; dst->ct_enum = src->ct_enum; FX_COPY_PTR(src->ct_ifaces, &dst->ct_ifaces); dst->ct_ifaces_id = src->ct_ifaces_id; FX_COPY_PTR(src->ct_scope, &dst->ct_scope); dst->ct_loc = src->ct_loc; } static void _fx_make_R17C_form__cdeftyp_t( struct _fx_R9Ast__id_t* r_ct_name, struct _fx_N14C_form__ctyp_t_data_t* r_ct_typ, fx_str_t* r_ct_cname, struct _fx_R17C_form__ctprops_t* r_ct_props, int_ r_ct_data_start, struct _fx_R9Ast__id_t* r_ct_enum, struct _fx_LR9Ast__id_t_data_t* r_ct_ifaces, struct _fx_R9Ast__id_t* r_ct_ifaces_id, struct _fx_LN12Ast__scope_t_data_t* r_ct_scope, struct _fx_R10Ast__loc_t* r_ct_loc, struct _fx_R17C_form__cdeftyp_t* fx_result) { fx_result->ct_name = *r_ct_name; FX_COPY_PTR(r_ct_typ, &fx_result->ct_typ); fx_copy_str(r_ct_cname, &fx_result->ct_cname); _fx_copy_R17C_form__ctprops_t(r_ct_props, &fx_result->ct_props); fx_result->ct_data_start = r_ct_data_start; fx_result->ct_enum = *r_ct_enum; FX_COPY_PTR(r_ct_ifaces, &fx_result->ct_ifaces); fx_result->ct_ifaces_id = *r_ct_ifaces_id; FX_COPY_PTR(r_ct_scope, &fx_result->ct_scope); fx_result->ct_loc = *r_ct_loc; } static void _fx_free_rR17C_form__cdeftyp_t(struct _fx_rR17C_form__cdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_free_R17C_form__cdeftyp_t); } static int _fx_make_rR17C_form__cdeftyp_t( struct _fx_R17C_form__cdeftyp_t* arg, struct _fx_rR17C_form__cdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_copy_R17C_form__cdeftyp_t); } static void _fx_free_Nt6option1N14C_form__cexp_t(struct _fx_Nt6option1N14C_form__cexp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__cexp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__cexp_t( struct _fx_Nt6option1N14C_form__cexp_t* src, struct _fx_Nt6option1N14C_form__cexp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t(struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* src, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { dst->t0 = src->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_Nt6option1N14C_form__cexp_t* t1, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* fx_result) { fx_result->t0 = *t0; _fx_copy_Nt6option1N14C_form__cexp_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static int _fx_cons_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* hd, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static void _fx_free_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* dst) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&dst->cenum_members); fx_free_str(&dst->cenum_cname); fx_free_list_simple(&dst->cenum_scope); } static void _fx_copy_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* src, struct _fx_R18C_form__cdefenum_t* dst) { dst->cenum_name = src->cenum_name; FX_COPY_PTR(src->cenum_members, &dst->cenum_members); fx_copy_str(&src->cenum_cname, &dst->cenum_cname); FX_COPY_PTR(src->cenum_scope, &dst->cenum_scope); dst->cenum_loc = src->cenum_loc; } static void _fx_make_R18C_form__cdefenum_t( struct _fx_R9Ast__id_t* r_cenum_name, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* r_cenum_members, fx_str_t* r_cenum_cname, struct _fx_LN12Ast__scope_t_data_t* r_cenum_scope, struct _fx_R10Ast__loc_t* r_cenum_loc, struct _fx_R18C_form__cdefenum_t* fx_result) { fx_result->cenum_name = *r_cenum_name; FX_COPY_PTR(r_cenum_members, &fx_result->cenum_members); fx_copy_str(r_cenum_cname, &fx_result->cenum_cname); FX_COPY_PTR(r_cenum_scope, &fx_result->cenum_scope); fx_result->cenum_loc = *r_cenum_loc; } static void _fx_free_rR18C_form__cdefenum_t(struct _fx_rR18C_form__cdefenum_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_free_R18C_form__cdefenum_t); } static int _fx_make_rR18C_form__cdefenum_t( struct _fx_R18C_form__cdefenum_t* arg, struct _fx_rR18C_form__cdefenum_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_copy_R18C_form__cdefenum_t); } static void _fx_free_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* dst) { fx_free_str(&dst->cm_cname); fx_free_list_simple(&dst->cm_args); _fx_free_LN15C_form__cstmt_t(&dst->cm_body); fx_free_list_simple(&dst->cm_scope); } static void _fx_copy_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* src, struct _fx_R19C_form__cdefmacro_t* dst) { dst->cm_name = src->cm_name; fx_copy_str(&src->cm_cname, &dst->cm_cname); FX_COPY_PTR(src->cm_args, &dst->cm_args); FX_COPY_PTR(src->cm_body, &dst->cm_body); FX_COPY_PTR(src->cm_scope, &dst->cm_scope); dst->cm_loc = src->cm_loc; } static void _fx_make_R19C_form__cdefmacro_t( struct _fx_R9Ast__id_t* r_cm_name, fx_str_t* r_cm_cname, struct _fx_LR9Ast__id_t_data_t* r_cm_args, struct _fx_LN15C_form__cstmt_t_data_t* r_cm_body, struct _fx_LN12Ast__scope_t_data_t* r_cm_scope, struct _fx_R10Ast__loc_t* r_cm_loc, struct _fx_R19C_form__cdefmacro_t* fx_result) { fx_result->cm_name = *r_cm_name; fx_copy_str(r_cm_cname, &fx_result->cm_cname); FX_COPY_PTR(r_cm_args, &fx_result->cm_args); FX_COPY_PTR(r_cm_body, &fx_result->cm_body); FX_COPY_PTR(r_cm_scope, &fx_result->cm_scope); fx_result->cm_loc = *r_cm_loc; } static void _fx_free_rR19C_form__cdefmacro_t(struct _fx_rR19C_form__cdefmacro_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_free_R19C_form__cdefmacro_t); } static int _fx_make_rR19C_form__cdefmacro_t( struct _fx_R19C_form__cdefmacro_t* arg, struct _fx_rR19C_form__cdefmacro_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_copy_R19C_form__cdefmacro_t); } static void _fx_free_R17C_form__cdefexn_t(struct _fx_R17C_form__cdefexn_t* dst) { fx_free_str(&dst->cexn_cname); fx_free_str(&dst->cexn_base_cname); _fx_free_N14C_form__ctyp_t(&dst->cexn_typ); fx_free_list_simple(&dst->cexn_scope); } static void _fx_copy_R17C_form__cdefexn_t(struct _fx_R17C_form__cdefexn_t* src, struct _fx_R17C_form__cdefexn_t* dst) { dst->cexn_name = src->cexn_name; fx_copy_str(&src->cexn_cname, &dst->cexn_cname); fx_copy_str(&src->cexn_base_cname, &dst->cexn_base_cname); FX_COPY_PTR(src->cexn_typ, &dst->cexn_typ); dst->cexn_std = src->cexn_std; dst->cexn_tag = src->cexn_tag; dst->cexn_data = src->cexn_data; dst->cexn_info = src->cexn_info; dst->cexn_make = src->cexn_make; FX_COPY_PTR(src->cexn_scope, &dst->cexn_scope); dst->cexn_loc = src->cexn_loc; } static void _fx_make_R17C_form__cdefexn_t( struct _fx_R9Ast__id_t* r_cexn_name, fx_str_t* r_cexn_cname, fx_str_t* r_cexn_base_cname, struct _fx_N14C_form__ctyp_t_data_t* r_cexn_typ, bool r_cexn_std, struct _fx_R9Ast__id_t* r_cexn_tag, struct _fx_R9Ast__id_t* r_cexn_data, struct _fx_R9Ast__id_t* r_cexn_info, struct _fx_R9Ast__id_t* r_cexn_make, struct _fx_LN12Ast__scope_t_data_t* r_cexn_scope, struct _fx_R10Ast__loc_t* r_cexn_loc, struct _fx_R17C_form__cdefexn_t* fx_result) { fx_result->cexn_name = *r_cexn_name; fx_copy_str(r_cexn_cname, &fx_result->cexn_cname); fx_copy_str(r_cexn_base_cname, &fx_result->cexn_base_cname); FX_COPY_PTR(r_cexn_typ, &fx_result->cexn_typ); fx_result->cexn_std = r_cexn_std; fx_result->cexn_tag = *r_cexn_tag; fx_result->cexn_data = *r_cexn_data; fx_result->cexn_info = *r_cexn_info; fx_result->cexn_make = *r_cexn_make; FX_COPY_PTR(r_cexn_scope, &fx_result->cexn_scope); fx_result->cexn_loc = *r_cexn_loc; } static void _fx_free_rR17C_form__cdefexn_t(struct _fx_rR17C_form__cdefexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdefexn_t, _fx_free_R17C_form__cdefexn_t); } static int _fx_make_rR17C_form__cdefexn_t( struct _fx_R17C_form__cdefexn_t* arg, struct _fx_rR17C_form__cdefexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdefexn_t, _fx_copy_R17C_form__cdefexn_t); } static void _fx_free_N15C_form__cinfo_t(struct _fx_N15C_form__cinfo_t* dst) { switch (dst->tag) { case 2: _fx_free_R17C_form__cdefval_t(&dst->u.CVal); break; case 3: _fx_free_rR17C_form__cdeffun_t(&dst->u.CFun); break; case 4: _fx_free_rR17C_form__cdeftyp_t(&dst->u.CTyp); break; case 5: _fx_free_rR17C_form__cdefexn_t(&dst->u.CExn); break; case 6: _fx_free_rR23C_form__cdefinterface_t(&dst->u.CInterface); break; case 7: _fx_free_rR18C_form__cdefenum_t(&dst->u.CEnum); break; case 8: _fx_free_R19C_form__cdeflabel_t(&dst->u.CLabel); break; case 9: _fx_free_rR19C_form__cdefmacro_t(&dst->u.CMacro); break; default: ; } dst->tag = 0; } static void _fx_copy_N15C_form__cinfo_t(struct _fx_N15C_form__cinfo_t* src, struct _fx_N15C_form__cinfo_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_R17C_form__cdefval_t(&src->u.CVal, &dst->u.CVal); break; case 3: FX_COPY_PTR(src->u.CFun, &dst->u.CFun); break; case 4: FX_COPY_PTR(src->u.CTyp, &dst->u.CTyp); break; case 5: FX_COPY_PTR(src->u.CExn, &dst->u.CExn); break; case 6: FX_COPY_PTR(src->u.CInterface, &dst->u.CInterface); break; case 7: FX_COPY_PTR(src->u.CEnum, &dst->u.CEnum); break; case 8: _fx_copy_R19C_form__cdeflabel_t(&src->u.CLabel, &dst->u.CLabel); break; case 9: FX_COPY_PTR(src->u.CMacro, &dst->u.CMacro); break; default: dst->u = src->u; } } static void _fx_free_T2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_free_T2R9Ast__id_tN14K_form__ktyp_t); } static int _fx_cons_LT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* hd, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t); } static void _fx_free_LN14K_form__ktyp_t(struct _fx_LN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__ktyp_t, _fx_free_N14K_form__ktyp_t); } static int _fx_cons_LN14K_form__ktyp_t( struct _fx_N14K_form__ktyp_t_data_t* hd, struct _fx_LN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__ktyp_t, FX_COPY_PTR); } static void _fx_free_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* src, struct _fx_T2R10Ast__loc_tS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2R10Ast__loc_tS(struct _fx_R10Ast__loc_t* t0, fx_str_t* t1, struct _fx_T2R10Ast__loc_tS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N11PP__pptok_t(struct _fx_N11PP__pptok_t* dst) { switch (dst->tag) { case 1: fx_free_str(&dst->u.PPString); break; default: ; } dst->tag = 0; } static void _fx_copy_N11PP__pptok_t(struct _fx_N11PP__pptok_t* src, struct _fx_N11PP__pptok_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: fx_copy_str(&src->u.PPString, &dst->u.PPString); break; default: dst->u = src->u; } } static void _fx_free_T2N11PP__pptok_ti(struct _fx_T2N11PP__pptok_ti* dst) { _fx_free_N11PP__pptok_t(&dst->t0); } static void _fx_copy_T2N11PP__pptok_ti(struct _fx_T2N11PP__pptok_ti* src, struct _fx_T2N11PP__pptok_ti* dst) { _fx_copy_N11PP__pptok_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N11PP__pptok_ti(struct _fx_N11PP__pptok_t* t0, int_ t1, struct _fx_T2N11PP__pptok_ti* fx_result) { _fx_copy_N11PP__pptok_t(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_R11PP__state_t(struct _fx_R11PP__state_t* dst) { fx_free_arr(&dst->q); fx_free_arr(&dst->stack); fx_free_arr(&dst->pp_stack); } static void _fx_copy_R11PP__state_t(struct _fx_R11PP__state_t* src, struct _fx_R11PP__state_t* dst) { dst->space = src->space; dst->left = src->left; dst->right = src->right; dst->top = src->top; dst->bottom = src->bottom; dst->lefttotal = src->lefttotal; dst->righttotal = src->righttotal; fx_copy_arr(&src->q, &dst->q); fx_copy_arr(&src->stack, &dst->stack); fx_copy_arr(&src->pp_stack, &dst->pp_stack); dst->pp_top = src->pp_top; dst->emptystack = src->emptystack; } static void _fx_make_R11PP__state_t( int_ r_space, int_ r_left, int_ r_right, int_ r_top, int_ r_bottom, int_ r_lefttotal, int_ r_righttotal, fx_arr_t* r_q, fx_arr_t* r_stack, fx_arr_t* r_pp_stack, int_ r_pp_top, bool r_emptystack, struct _fx_R11PP__state_t* fx_result) { fx_result->space = r_space; fx_result->left = r_left; fx_result->right = r_right; fx_result->top = r_top; fx_result->bottom = r_bottom; fx_result->lefttotal = r_lefttotal; fx_result->righttotal = r_righttotal; fx_copy_arr(r_q, &fx_result->q); fx_copy_arr(r_stack, &fx_result->stack); fx_copy_arr(r_pp_stack, &fx_result->pp_stack); fx_result->pp_top = r_pp_top; fx_result->emptystack = r_emptystack; } static void _fx_free_rR11PP__state_t(struct _fx_rR11PP__state_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR11PP__state_t, _fx_free_R11PP__state_t); } static int _fx_make_rR11PP__state_t(struct _fx_R11PP__state_t* arg, struct _fx_rR11PP__state_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR11PP__state_t, _fx_copy_R11PP__state_t); } static void _fx_free_R5PP__t(struct _fx_R5PP__t* dst) { fx_free_fp(&dst->print_f); fx_free_fp(&dst->get_f); _fx_free_rR11PP__state_t(&dst->r); } static void _fx_copy_R5PP__t(struct _fx_R5PP__t* src, struct _fx_R5PP__t* dst) { dst->margin = src->margin; dst->default_indent = src->default_indent; FX_COPY_FP(&src->print_f, &dst->print_f); FX_COPY_FP(&src->get_f, &dst->get_f); FX_COPY_PTR(src->r, &dst->r); } static void _fx_make_R5PP__t( int_ r_margin, int_ r_default_indent, struct _fx_FPv1S* r_print_f, struct _fx_FPLS0* r_get_f, struct _fx_rR11PP__state_t_data_t* r_r, struct _fx_R5PP__t* fx_result) { fx_result->margin = r_margin; fx_result->default_indent = r_default_indent; FX_COPY_FP(r_print_f, &fx_result->print_f); FX_COPY_FP(r_get_f, &fx_result->get_f); FX_COPY_PTR(r_r, &fx_result->r); } static void _fx_free_N14K_form__klit_t(struct _fx_N14K_form__klit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.KLitString); break; case 8: _fx_free_N14K_form__ktyp_t(&dst->u.KLitNil); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__klit_t(struct _fx_N14K_form__klit_t* src, struct _fx_N14K_form__klit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.KLitString, &dst->u.KLitString); break; case 8: FX_COPY_PTR(src->u.KLitNil, &dst->u.KLitNil); break; default: dst->u = src->u; } } static void _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { _fx_free_LN14K_form__ktyp_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* src, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_LN14K_form__ktyp_t_data_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* src, struct _fx_T2iN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14K_form__ktyp_t( int_ t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2iN14K_form__ktyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 11: _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(&(*dst)->u.KTypFun); break; case 12: _fx_free_LN14K_form__ktyp_t(&(*dst)->u.KTypTuple); break; case 13: _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(&(*dst)->u.KTypRecord); break; case 15: _fx_free_T2iN14K_form__ktyp_t(&(*dst)->u.KTypArray); break; case 16: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypVector); break; case 17: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypList); break; case 18: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_Nt6option1N14C_form__ctyp_t(struct _fx_Nt6option1N14C_form__ctyp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__ctyp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__ctyp_t( struct _fx_Nt6option1N14C_form__ctyp_t* src, struct _fx_Nt6option1N14C_form__ctyp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* src, struct _fx_T2SR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SR10Ast__loc_t(fx_str_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2SR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__ctyp_t, _fx_free_N14C_form__ctyp_t); } static int _fx_cons_LN14C_form__ctyp_t( struct _fx_N14C_form__ctyp_t_data_t* hd, struct _fx_LN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__ctyp_t, FX_COPY_PTR); } static void _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { _fx_free_LN14C_form__ctyp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* src, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static int _fx_cons_LN19C_form__ctyp_attr_t( struct _fx_N19C_form__ctyp_attr_t* hd, struct _fx_LN19C_form__ctyp_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__ctyp_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__ctyp_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { fx_free_list_simple(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* src, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* src, struct _fx_T2iN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14C_form__ctyp_t( int_ t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2iN14C_form__ctyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 13: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypStruct); break; case 14: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypUnion); break; case 15: _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(&(*dst)->u.CTypFunRawPtr); break; case 16: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawPtr); break; case 17: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawArray); break; case 18: _fx_free_T2iN14C_form__ctyp_t(&(*dst)->u.CTypArray); break; case 19: _fx_free_N14C_form__ctyp_t(&(*dst)->u.CTypVector); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); } static void _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__klit_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__klit_t(&src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14K_form__klit_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__klit_t(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N17C_form__cbinary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N16C_form__cunary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LN14C_form__cexp_t(struct _fx_LN14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__cexp_t, _fx_free_N14C_form__cexp_t); } static int _fx_cons_LN14C_form__cexp_t( struct _fx_N14C_form__cexp_t_data_t* hd, struct _fx_LN14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__cexp_t, FX_COPY_PTR); } static void _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpIdent); break; case 2: _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpLit); break; case 3: _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( &(*dst)->u.CExpBinary); break; case 4: _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpUnary); break; case 5: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpMem); break; case 6: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpArrow); break; case 7: _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCast); break; case 8: _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTernary); break; case 9: _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCall); break; case 10: _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpInit); break; case 11: _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTyp); break; case 12: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CExpCCode); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__cexp_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__cexp_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__cexp_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LN15C_form__cstmt_t(&dst->t0); } static void _fx_copy_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LN15C_form__cstmt_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* src, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_R9Ast__id_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); _fx_free_N15C_form__cstmt_t(&dst->t2); } static void _fx_copy_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_N15C_form__cstmt_t_data_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__ctyp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); _fx_free_LN14C_form__cexp_t(&dst->t3); _fx_free_N15C_form__cstmt_t(&dst->t4); } static void _fx_copy_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__ctyp_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); dst->t5 = src->t5; } static void _fx_make_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__ctyp_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_LN14C_form__cexp_t_data_t* t3, struct _fx_N15C_form__cstmt_t_data_t* t4, struct _fx_R10Ast__loc_t* t5, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__ctyp_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); fx_result->t5 = *t5; } static void _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_N15C_form__cstmt_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); } static void _fx_copy_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CComment); break; case 3: _fx_free_N14C_form__cexp_t(&(*dst)->u.CExp); break; case 6: _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtReturn); break; case 7: _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtBlock); break; case 8: _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(&(*dst)->u.CStmtSync); break; case 9: _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtIf); break; case 12: _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( &(*dst)->u.CStmtFor); break; case 13: _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtWhile); break; case 14: _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtDoWhile); break; case 15: _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtSwitch); break; case 16: _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CDefVal); break; case 17: _fx_free_rR17C_form__cdeffun_t(&(*dst)->u.CDefFun); break; case 18: _fx_free_rR17C_form__cdeftyp_t(&(*dst)->u.CDefTyp); break; case 21: _fx_free_rR18C_form__cdefenum_t(&(*dst)->u.CDefEnum); break; case 22: _fx_free_rR23C_form__cdefinterface_t(&(*dst)->u.CDefInterface); break; case 23: _fx_free_rR19C_form__cdefmacro_t(&(*dst)->u.CMacroDef); break; case 25: _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CMacroIf); break; case 26: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroInclude); break; case 27: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T3SiN13C_pp__assoc_t(struct _fx_T3SiN13C_pp__assoc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T3SiN13C_pp__assoc_t(struct _fx_T3SiN13C_pp__assoc_t* src, struct _fx_T3SiN13C_pp__assoc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; } static void _fx_make_T3SiN13C_pp__assoc_t( fx_str_t* t0, int_ t1, struct _fx_N13C_pp__assoc_t* t2, struct _fx_T3SiN13C_pp__assoc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = t1; fx_result->t2 = *t2; } _fx_Nt6option1R9Ast__id_t _fx_g10C_pp__None = { 1 }; _fx_N19C_form__ctyp_attr_t _fx_g15C_pp__CTypConst = { 1 }; _fx_N19C_form__ctyp_attr_t _fx_g18C_pp__CTypVolatile = { 2 }; _fx_N19C_form__ctyp_attr_t _fx_g16C_pp__CTypStatic = { 3 }; _fx_N13C_pp__assoc_t _fx_g15C_pp__AssocLeft = { 1 }; _fx_N13C_pp__assoc_t _fx_g16C_pp__AssocRight = { 2 }; FX_EXTERN_C int _fx_M3AstFM6__eq__B2RM4id_tRM4id_t( struct _fx_R9Ast__id_t* a_0, struct _fx_R9Ast__id_t* b_0, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t( struct _fx_R9Ast__id_t* n_0, struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM3strv2RM1tS(struct _fx_R5PP__t* pp_0, fx_str_t* s_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM5beginv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM9ctyp2str_S2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t_0, struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); static int _fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM11compile_errE2RM5loc_tS( struct _fx_R10Ast__loc_t* loc_0, fx_str_t* msg_0, fx_exn_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM5spacev1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); static int _fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( bool add_space_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM3cutv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM3endv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); static int _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( fx_str_t* prefix_0, struct _fx_Nt6option1R9Ast__id_t* n_opt_0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* elems_0, fx_str_t* suffix_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t( struct _fx_R9Ast__id_t* i_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_N15C_form__cinfo_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM7newlinev1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM8newlineuv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_F3ordi1C(char_ c_0, int_* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F6stringS1i(int_ a, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM5splitLS3SCB( fx_str_t* s_0, char_ c_0, bool allow_empty_0, struct _fx_LS_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM7escapedS2SB(fx_str_t* s_0, bool quotes_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C bool _fx_M6StringFM8endswithB2SC(fx_str_t* s, char_ suffix, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM8klit2strS3N14K_form__klit_tBR10Ast__loc_t( struct _fx_N14K_form__klit_t* lit_0, bool cmode_0, struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t( struct _fx_R5PP__t* pp_0, struct _fx_LN14C_form__cexp_t_data_t* el_0, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM5stripS1S(fx_str_t* s, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6beginvv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6break0v1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6beginvv2RM1ti(struct _fx_R5PP__t* pp_0, int_ indent_0, void* fx_fv); FX_EXTERN_C int _fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R18Options__options_t _fx_g12Options__opt) FX_EXTERN_C_VAL(struct _fx_R9Ast__id_t _fx_g9Ast__noid) FX_EXTERN_C int _fx_M3AstFM2ppS1RM4id_t(struct _fx_R9Ast__id_t* i_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM7replaceS3SSS( fx_str_t* s, fx_str_t* substr, fx_str_t* new_substr, fx_str_t* fx_result, void* fx_fv); static int _fx_M4C_ppFM16print_cascade_ifv5SN14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR5PP__t( fx_str_t* prefix_0, struct _fx_N14C_form__cexp_t_data_t* e_0, struct _fx_N15C_form__cstmt_t_data_t* s1_0, struct _fx_N15C_form__cstmt_t_data_t* s2_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6breakuv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_F7__mul__S2Ci(char_ c, int_ n, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM13get_idc_cnameS2R9Ast__id_tR10Ast__loc_t( struct _fx_R9Ast__id_t* i_0, struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM21pprint_to_string_listRM1t2ii( int_ margin_0, int_ default_indent_0, struct _fx_R5PP__t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM5flushv1RM1t(struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_F12join_embraceS4SSSLS( fx_str_t* begin_0, fx_str_t* end_0, fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C void _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t( struct _fx_R9Ast__id_t* arg0, struct _fx_Nt6option1R9Ast__id_t* fx_result) { fx_result->tag = 2; fx_result->u.Some = *arg0; } FX_EXTERN_C int _fx_M4C_ppFM6__ne__B2R9Ast__id_tR9Ast__id_t( struct _fx_R9Ast__id_t* a_0, struct _fx_R9Ast__id_t* b_0, bool* fx_result, void* fx_fv) { int fx_status = 0; bool res_0; FX_CALL(_fx_M3AstFM6__eq__B2RM4id_tRM4id_t(a_0, b_0, &res_0, 0), _fx_cleanup); *fx_result = !res_0; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LS(struct _fx_LS_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int _fx_M4C_ppFM6stringS1S(fx_str_t* a_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; fx_copy_str(a_0, fx_result); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8length1_i1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* l_0, int_* fx_result, void* fx_fv) { int fx_status = 0; *fx_result = _fx_M4C_ppFM6lengthi1LN15C_form__cstmt_t(l_0, 0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8length1_i1LS(struct _fx_LS_data_t* l_0, int_* fx_result, void* fx_fv) { int fx_status = 0; *fx_result = _fx_M4C_ppFM6lengthi1LS(l_0, 0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8length1_i1LN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* l_0, int_* fx_result, void* fx_fv) { int fx_status = 0; *fx_result = _fx_M4C_ppFM6lengthi1LN14C_form__ctyp_t(l_0, 0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* l_0, struct _fx_N19C_form__ctyp_attr_t* a_0, bool* fx_result, void* fx_fv) { int fx_status = 0; bool __fold_result___0 = false; _fx_LN19C_form__ctyp_attr_t lst_0 = l_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_N19C_form__ctyp_attr_t* b_0 = &lst_0->hd; if (a_0->tag == b_0->tag) { __fold_result___0 = true; FX_BREAK(_fx_catch_0); } _fx_catch_0: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } *fx_result = __fold_result___0; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM10binop2str_T3SiN13C_pp__assoc_t1N17C_form__cbinary_t( struct _fx_N17C_form__cbinary_t* bop_0, struct _fx_T3SiN13C_pp__assoc_t* fx_result, void* fx_fv) { int fx_status = 0; int tag_0 = bop_0->tag; if (tag_0 == 14) { fx_str_t slit_0 = FX_MAKE_STR(""); _fx_make_T3SiN13C_pp__assoc_t(&slit_0, 1400, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 3) { fx_str_t slit_1 = FX_MAKE_STR("*"); _fx_make_T3SiN13C_pp__assoc_t(&slit_1, 1200, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 4) { fx_str_t slit_2 = FX_MAKE_STR("/"); _fx_make_T3SiN13C_pp__assoc_t(&slit_2, 1200, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 5) { fx_str_t slit_3 = FX_MAKE_STR("%"); _fx_make_T3SiN13C_pp__assoc_t(&slit_3, 1200, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 1) { fx_str_t slit_4 = FX_MAKE_STR("+"); _fx_make_T3SiN13C_pp__assoc_t(&slit_4, 1100, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 2) { fx_str_t slit_5 = FX_MAKE_STR("-"); _fx_make_T3SiN13C_pp__assoc_t(&slit_5, 1100, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 6) { fx_str_t slit_6 = FX_MAKE_STR("<<"); _fx_make_T3SiN13C_pp__assoc_t(&slit_6, 1000, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 7) { fx_str_t slit_7 = FX_MAKE_STR(">>"); _fx_make_T3SiN13C_pp__assoc_t(&slit_7, 1000, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 3) { fx_str_t slit_8 = FX_MAKE_STR("<"); _fx_make_T3SiN13C_pp__assoc_t(&slit_8, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 4) { fx_str_t slit_9 = FX_MAKE_STR("<="); _fx_make_T3SiN13C_pp__assoc_t(&slit_9, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 6) { fx_str_t slit_10 = FX_MAKE_STR(">"); _fx_make_T3SiN13C_pp__assoc_t(&slit_10, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 5) { fx_str_t slit_11 = FX_MAKE_STR(">="); _fx_make_T3SiN13C_pp__assoc_t(&slit_11, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 1) { fx_str_t slit_12 = FX_MAKE_STR("=="); _fx_make_T3SiN13C_pp__assoc_t(&slit_12, 800, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 2) { fx_str_t slit_13 = FX_MAKE_STR("!="); _fx_make_T3SiN13C_pp__assoc_t(&slit_13, 800, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 8) { fx_str_t slit_14 = FX_MAKE_STR("&"); _fx_make_T3SiN13C_pp__assoc_t(&slit_14, 700, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 10) { fx_str_t slit_15 = FX_MAKE_STR("^"); _fx_make_T3SiN13C_pp__assoc_t(&slit_15, 600, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 9) { fx_str_t slit_16 = FX_MAKE_STR("|"); _fx_make_T3SiN13C_pp__assoc_t(&slit_16, 500, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 11) { fx_str_t slit_17 = FX_MAKE_STR("&&"); _fx_make_T3SiN13C_pp__assoc_t(&slit_17, 400, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 12) { fx_str_t slit_18 = FX_MAKE_STR("||"); _fx_make_T3SiN13C_pp__assoc_t(&slit_18, 300, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 15) { fx_str_t slit_19 = FX_MAKE_STR("="); _fx_make_T3SiN13C_pp__assoc_t(&slit_19, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 16) { fx_str_t slit_20 = FX_MAKE_STR("+="); _fx_make_T3SiN13C_pp__assoc_t(&slit_20, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 17) { fx_str_t slit_21 = FX_MAKE_STR("-="); _fx_make_T3SiN13C_pp__assoc_t(&slit_21, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 18) { fx_str_t slit_22 = FX_MAKE_STR("*="); _fx_make_T3SiN13C_pp__assoc_t(&slit_22, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 19) { fx_str_t slit_23 = FX_MAKE_STR("/="); _fx_make_T3SiN13C_pp__assoc_t(&slit_23, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 20) { fx_str_t slit_24 = FX_MAKE_STR("%="); _fx_make_T3SiN13C_pp__assoc_t(&slit_24, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 21) { fx_str_t slit_25 = FX_MAKE_STR("<<="); _fx_make_T3SiN13C_pp__assoc_t(&slit_25, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 22) { fx_str_t slit_26 = FX_MAKE_STR(">>="); _fx_make_T3SiN13C_pp__assoc_t(&slit_26, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 23) { fx_str_t slit_27 = FX_MAKE_STR("&="); _fx_make_T3SiN13C_pp__assoc_t(&slit_27, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 24) { fx_str_t slit_28 = FX_MAKE_STR("|="); _fx_make_T3SiN13C_pp__assoc_t(&slit_28, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 25) { fx_str_t slit_29 = FX_MAKE_STR("^="); _fx_make_T3SiN13C_pp__assoc_t(&slit_29, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); _fx_endmatch_0: ; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM9unop2str_T3SiN13C_pp__assoc_t1N16C_form__cunary_t( struct _fx_N16C_form__cunary_t* uop_0, struct _fx_T3SiN13C_pp__assoc_t* fx_result, void* fx_fv) { int fx_status = 0; int tag_0 = uop_0->tag; if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR("+"); _fx_make_T3SiN13C_pp__assoc_t(&slit_0, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 2) { fx_str_t slit_1 = FX_MAKE_STR("-"); _fx_make_T3SiN13C_pp__assoc_t(&slit_1, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 3) { fx_str_t slit_2 = FX_MAKE_STR("~"); _fx_make_T3SiN13C_pp__assoc_t(&slit_2, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 4) { fx_str_t slit_3 = FX_MAKE_STR("!"); _fx_make_T3SiN13C_pp__assoc_t(&slit_3, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 5) { fx_str_t slit_4 = FX_MAKE_STR("*"); _fx_make_T3SiN13C_pp__assoc_t(&slit_4, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 6) { fx_str_t slit_5 = FX_MAKE_STR("&"); _fx_make_T3SiN13C_pp__assoc_t(&slit_5, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 7) { fx_str_t slit_6 = FX_MAKE_STR("++"); _fx_make_T3SiN13C_pp__assoc_t(&slit_6, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 8) { fx_str_t slit_7 = FX_MAKE_STR("--"); _fx_make_T3SiN13C_pp__assoc_t(&slit_7, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 9) { fx_str_t slit_8 = FX_MAKE_STR("++"); _fx_make_T3SiN13C_pp__assoc_t(&slit_8, 1400, &_fx_g15C_pp__AssocLeft, fx_result); } else if (tag_0 == 10) { fx_str_t slit_9 = FX_MAKE_STR("--"); _fx_make_T3SiN13C_pp__assoc_t(&slit_9, 1400, &_fx_g15C_pp__AssocLeft, fx_result); } else { FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t( struct _fx_R5PP__t* pp_0, struct _fx_R9Ast__id_t* n_0, struct _fx_R10Ast__loc_t* loc_0, void* fx_fv) { fx_str_t v_0 = {0}; int fx_status = 0; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(n_0, loc_0, &v_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_cleanup); _fx_cleanup: ; FX_FREE_STR(&v_0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t( struct _fx_R5PP__t* pp_0, fx_str_t* prefix0_0, fx_str_t* suffix0_0, struct _fx_N14C_form__ctyp_t_data_t* t_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, bool fwd_mode_0, struct _fx_R10Ast__loc_t* loc_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(t_0); bool res_0; if (tag_0 == 1) { res_0 = true; } else if (tag_0 == 2) { res_0 = true; } else if (tag_0 == 3) { res_0 = true; } else if (tag_0 == 4) { res_0 = true; } else if (tag_0 == 5) { res_0 = true; } else if (tag_0 == 6) { res_0 = true; } else if (tag_0 == 11) { res_0 = true; } else if (tag_0 == 9) { res_0 = true; } else if (tag_0 == 8) { res_0 = true; } else if (tag_0 == 12) { res_0 = true; } else if (tag_0 == 10) { res_0 = true; } else if (tag_0 == 18) { res_0 = true; } else if (tag_0 == 19) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_cleanup); if (res_0) { fx_str_t v_0 = {0}; FX_CALL(_fx_M6C_formFM9ctyp2str_S2N14C_form__ctyp_tR10Ast__loc_t(t_0, loc_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_0); goto _fx_endmatch_3; } if (tag_0 == 7) { fx_str_t slit_0 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_2); if (id_opt_0->tag == 2) { fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; fx_exn_t v_4 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_1, 0), _fx_catch_1); FX_CALL(_fx_M4C_ppFM6stringS1S(&v_1, &v_2, 0), _fx_catch_1); fx_str_t slit_1 = FX_MAKE_STR("c_pp.ml: void cannot be used with id \'"); fx_str_t slit_2 = FX_MAKE_STR("\'"); { const fx_str_t strs_0[] = { slit_1, v_2, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_3), _fx_catch_1); } FX_CALL(_fx_M3AstFM11compile_errE2RM5loc_tS(loc_0, &v_3, &v_4, 0), _fx_catch_1); FX_THROW(&v_4, false, _fx_catch_1); _fx_catch_1: ; fx_free_exn(&v_4); FX_FREE_STR(&v_3); FX_FREE_STR(&v_2); FX_FREE_STR(&v_1); } FX_CHECK_EXN(_fx_catch_2); _fx_catch_2: ; goto _fx_endmatch_3; } if (tag_0 == 15) { _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* vcase_0 = &t_0->u.CTypFunRawPtr; _fx_LN14C_form__ctyp_t args_0 = vcase_0->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_3 = FX_MAKE_STR(""); fx_str_t slit_4 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_3, &slit_4, vcase_0->t1, &_fx_g10C_pp__None, true, loc_0, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR("(*"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_5, 0), _fx_catch_7); FX_CALL(_fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(false, id_opt_0, loc_0, pp_0, 0), _fx_catch_7); fx_str_t slit_6 = FX_MAKE_STR(")("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); if (args_0 == 0) { fx_str_t slit_7 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_7, 0), _fx_catch_3); _fx_catch_3: ; goto _fx_endmatch_0; } if (args_0 != 0) { if (args_0->tl == 0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_str_t slit_9 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_8, &slit_9, args_0->hd, &_fx_g10C_pp__None, true, loc_0, 0), _fx_catch_4); _fx_catch_4: ; goto _fx_endmatch_0; } } _fx_LN14C_form__ctyp_t args_1 = 0; int_ nargs_0; FX_CALL(_fx_M4C_ppFM8length1_i1LN14C_form__ctyp_t(args_0, &nargs_0, 0), _fx_catch_6); int_ i_0 = 0; FX_COPY_PTR(args_0, &args_1); _fx_LN14C_form__ctyp_t lst_0 = args_1; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { fx_str_t v_5 = {0}; _fx_N14C_form__ctyp_t ti_0 = lst_0->hd; bool last_0 = i_0 == nargs_0 - 1; if (last_0) { fx_str_t slit_10 = FX_MAKE_STR(""); fx_copy_str(&slit_10, &v_5); } else { fx_str_t slit_11 = FX_MAKE_STR(","); fx_copy_str(&slit_11, &v_5); } fx_str_t slit_12 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_12, &v_5, ti_0, &_fx_g10C_pp__None, true, loc_0, 0), _fx_catch_5); if (!last_0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_5); } _fx_catch_5: ; FX_FREE_STR(&v_5); FX_CHECK_EXN(_fx_catch_6); } _fx_catch_6: ; if (args_1) { _fx_free_LN14C_form__ctyp_t(&args_1); } _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_13 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_13, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); _fx_catch_7: ; goto _fx_endmatch_3; } if (tag_0 == 13) { fx_str_t v_6 = {0}; _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* vcase_1 = &t_0->u.CTypStruct; fx_str_t slit_14 = FX_MAKE_STR("struct"); { const fx_str_t strs_1[] = { *prefix0_0, slit_14 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_6), _fx_catch_8); } fx_str_t slit_15 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( &v_6, &vcase_1->t0, vcase_1->t1, &slit_15, id_opt_0, loc_0, pp_0, 0), _fx_catch_8); _fx_catch_8: ; FX_FREE_STR(&v_6); goto _fx_endmatch_3; } if (tag_0 == 16) { _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* vcase_2 = &t_0->u.CTypRawPtr; if (vcase_2->t0 == 0) { _fx_N14C_form__ctyp_t v_7 = vcase_2->t1; if (FX_REC_VARIANT_TAG(v_7) == 13) { fx_str_t suffix_0 = {0}; fx_str_t v_8 = {0}; _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* vcase_3 = &v_7->u.CTypStruct; _fx_Nt6option1R9Ast__id_t* n_opt_0 = &vcase_3->t0; if (n_opt_0->tag == 2) { fx_str_t v_9 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&n_opt_0->u.Some, loc_0, &v_9, 0), _fx_catch_9); fx_str_t slit_16 = FX_MAKE_STR(", *"); { const fx_str_t strs_2[] = { v_9, slit_16 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &suffix_0), _fx_catch_9); } _fx_catch_9: ; FX_FREE_STR(&v_9); } else { fx_str_t slit_17 = FX_MAKE_STR("*"); fx_copy_str(&slit_17, &suffix_0); } FX_CHECK_EXN(_fx_catch_10); fx_str_t slit_18 = FX_MAKE_STR("struct"); { const fx_str_t strs_3[] = { *prefix0_0, slit_18 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 2, &v_8), _fx_catch_10); } FX_CALL( _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( &v_8, n_opt_0, vcase_3->t1, &suffix_0, id_opt_0, loc_0, pp_0, 0), _fx_catch_10); _fx_catch_10: ; FX_FREE_STR(&v_8); FX_FREE_STR(&suffix_0); goto _fx_endmatch_3; } } } if (tag_0 == 14) { fx_str_t v_10 = {0}; _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* vcase_4 = &t_0->u.CTypUnion; fx_str_t slit_19 = FX_MAKE_STR("union"); { const fx_str_t strs_4[] = { *prefix0_0, slit_19 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 2, &v_10), _fx_catch_11); } fx_str_t slit_20 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( &v_10, &vcase_4->t0, vcase_4->t1, &slit_20, id_opt_0, loc_0, pp_0, 0), _fx_catch_11); _fx_catch_11: ; FX_FREE_STR(&v_10); goto _fx_endmatch_3; } if (tag_0 == 16) { _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* vcase_5 = &t_0->u.CTypRawPtr; _fx_LN19C_form__ctyp_attr_t attrs_0 = vcase_5->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_12); bool v_11; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_0, &_fx_g16C_pp__CTypStatic, &v_11, 0), _fx_catch_12); if (v_11) { fx_str_t slit_21 = FX_MAKE_STR("static "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_21, 0), _fx_catch_12); } bool v_12; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_0, &_fx_g18C_pp__CTypVolatile, &v_12, 0), _fx_catch_12); if (v_12) { fx_str_t slit_22 = FX_MAKE_STR("volatile "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_22, 0), _fx_catch_12); } fx_str_t slit_23 = FX_MAKE_STR(""); fx_str_t slit_24 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_23, &slit_24, vcase_5->t1, &_fx_g10C_pp__None, fwd_mode_0, loc_0, 0), _fx_catch_12); fx_str_t slit_25 = FX_MAKE_STR("*"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_25, 0), _fx_catch_12); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_12); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_12); _fx_catch_12: ; goto _fx_endmatch_3; } if (tag_0 == 17) { _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* vcase_6 = &t_0->u.CTypRawArray; _fx_LN19C_form__ctyp_attr_t attrs_1 = vcase_6->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_13); bool v_13; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_1, &_fx_g16C_pp__CTypStatic, &v_13, 0), _fx_catch_13); if (v_13) { fx_str_t slit_26 = FX_MAKE_STR("static "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_26, 0), _fx_catch_13); } bool v_14; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_1, &_fx_g18C_pp__CTypVolatile, &v_14, 0), _fx_catch_13); if (v_14) { fx_str_t slit_27 = FX_MAKE_STR("volatile "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_27, 0), _fx_catch_13); } bool v_15; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_1, &_fx_g15C_pp__CTypConst, &v_15, 0), _fx_catch_13); if (v_15) { fx_str_t slit_28 = FX_MAKE_STR("const "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_28, 0), _fx_catch_13); } fx_str_t slit_29 = FX_MAKE_STR(""); fx_str_t slit_30 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_29, &slit_30, vcase_6->t1, &_fx_g10C_pp__None, fwd_mode_0, loc_0, 0), _fx_catch_13); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_13); FX_CALL(_fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(false, id_opt_0, loc_0, pp_0, 0), _fx_catch_13); fx_str_t slit_31 = FX_MAKE_STR("[]"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_31, 0), _fx_catch_13); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_13); _fx_catch_13: ; goto _fx_endmatch_3; } if (tag_0 == 20) { _fx_R9Ast__id_t* n_0 = &t_0->u.CTypName; if (fwd_mode_0 == false) { FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, n_0, loc_0, 0), _fx_catch_14); _fx_catch_14: ; goto _fx_endmatch_2; } if (fwd_mode_0 == true) { if (n_0->m == 0) { FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, n_0, loc_0, 0), _fx_catch_15); _fx_catch_15: ; goto _fx_endmatch_2; } } _fx_N15C_form__cinfo_t v_16 = {0}; _fx_R17C_form__cdeftyp_t v_17 = {0}; _fx_R17C_form__cdeftyp_t v_18 = {0}; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(n_0, loc_0, &v_16, 0), _fx_catch_20); int tag_1 = v_16.tag; if (tag_1 == 4) { _fx_copy_R17C_form__cdeftyp_t(&v_16.u.CTyp->data, &v_17); _fx_N14C_form__ctyp_t v_19 = v_17.ct_typ; if (FX_REC_VARIANT_TAG(v_19) == 16) { _fx_N14C_form__ctyp_t v_20 = v_19->u.CTypRawPtr.t1; if (FX_REC_VARIANT_TAG(v_20) == 13) { _fx_Nt6option1R9Ast__id_t* v_21 = &v_20->u.CTypStruct.t0; if (v_21->tag == 2) { fx_str_t slit_32 = FX_MAKE_STR("struct "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_32, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &v_21->u.Some, loc_0, 0), _fx_catch_16); fx_str_t slit_33 = FX_MAKE_STR("*"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_33, 0), _fx_catch_16); _fx_catch_16: ; goto _fx_endmatch_1; } } } } if (tag_1 == 4) { _fx_copy_R17C_form__cdeftyp_t(&v_16.u.CTyp->data, &v_18); _fx_N14C_form__ctyp_t v_22 = v_18.ct_typ; if (FX_REC_VARIANT_TAG(v_22) == 13) { _fx_Nt6option1R9Ast__id_t* v_23 = &v_22->u.CTypStruct.t0; if (v_23->tag == 2) { fx_str_t slit_34 = FX_MAKE_STR("struct "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_34, 0), _fx_catch_17); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &v_23->u.Some, loc_0, 0), _fx_catch_17); _fx_catch_17: ; goto _fx_endmatch_1; } } } if (tag_1 == 6) { _fx_R23C_form__cdefinterface_t v_24 = {0}; fx_str_t v_25 = {0}; fx_str_t v_26 = {0}; _fx_copy_R23C_form__cdefinterface_t(&v_16.u.CInterface->data, &v_24); FX_CALL(_fx_M4C_ppFM6stringS1S(&v_24.ci_cname, &v_25, 0), _fx_catch_18); fx_str_t slit_35 = FX_MAKE_STR("struct "); { const fx_str_t strs_5[] = { slit_35, v_25 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 2, &v_26), _fx_catch_18); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_26, 0), _fx_catch_18); _fx_catch_18: ; FX_FREE_STR(&v_26); FX_FREE_STR(&v_25); _fx_free_R23C_form__cdefinterface_t(&v_24); goto _fx_endmatch_1; } if (FX_STR_LENGTH(*prefix0_0) != 0) { FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, prefix0_0, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_19); } FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, n_0, loc_0, 0), _fx_catch_19); _fx_catch_19: ; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_20); _fx_catch_20: ; _fx_free_R17C_form__cdeftyp_t(&v_18); _fx_free_R17C_form__cdeftyp_t(&v_17); _fx_free_N15C_form__cinfo_t(&v_16); _fx_endmatch_2: ; FX_CHECK_EXN(_fx_catch_21); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_21); _fx_catch_21: ; goto _fx_endmatch_3; } if (tag_0 == 21) { fx_str_t slit_36 = FX_MAKE_STR("/*<label>*/"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_36, 0), _fx_catch_22); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_22); _fx_catch_22: ; goto _fx_endmatch_3; } if (tag_0 == 22) { fx_str_t slit_37 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_37, 0), _fx_catch_23); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_23); _fx_catch_23: ; goto _fx_endmatch_3; } FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); _fx_endmatch_3: ; FX_CHECK_EXN(_fx_cleanup); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, suffix0_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } static int _fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( bool add_space_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { int fx_status = 0; if (id_opt_0->tag == 2) { fx_str_t v_0 = {0}; if (add_space_0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_0); } FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_0); } return fx_status; } static int _fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { int fx_status = 0; if (id_opt_0->tag == 2) { fx_str_t v_0 = {0}; FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_0); FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_0); } return fx_status; } static int _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( fx_str_t* prefix_0, struct _fx_Nt6option1R9Ast__id_t* n_opt_0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* elems_0, fx_str_t* suffix_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR(" "); { const fx_str_t strs_0[] = { *prefix_0, slit_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_0), _fx_cleanup); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_cleanup); if (n_opt_0->tag == 2) { fx_str_t v_2 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&n_opt_0->u.Some, loc_0, &v_2, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_2, 0), _fx_catch_0); fx_str_t slit_1 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_2); } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_2 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_cleanup); _fx_LT2R9Ast__id_tN14C_form__ctyp_t lst_0 = elems_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_N14C_form__ctyp_t ti_0 = 0; _fx_T2R9Ast__id_tN14C_form__ctyp_t* __pat___0 = &lst_0->hd; _fx_R9Ast__id_t ni_0 = __pat___0->t0; FX_COPY_PTR(__pat___0->t1, &ti_0); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_1); int tag_0 = FX_REC_VARIANT_TAG(ti_0); bool need_nested_box_0; bool res_0; if (tag_0 == 13) { res_0 = true; goto _fx_endmatch_0; } if (tag_0 == 14) { res_0 = true; goto _fx_endmatch_0; } if (tag_0 == 16) { if (FX_REC_VARIANT_TAG(ti_0->u.CTypRawPtr.t1) == 13) { res_0 = true; goto _fx_endmatch_0; } } res_0 = false; _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_1); if (res_0) { need_nested_box_0 = false; goto _fx_endmatch_1; } need_nested_box_0 = true; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_1); if (need_nested_box_0) { FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_1); } _fx_Nt6option1R9Ast__id_t v_3; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(&ni_0, &v_3); fx_str_t slit_3 = FX_MAKE_STR(""); fx_str_t slit_4 = FX_MAKE_STR(";"); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_3, &slit_4, ti_0, &v_3, true, loc_0, 0), _fx_catch_1); if (need_nested_box_0) { FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_1); } _fx_catch_1: ; if (ti_0) { _fx_free_N14C_form__ctyp_t(&ti_0); } FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M2PPFM8newlineuv1RM1t(pp_0, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("} "); { const fx_str_t strs_1[] = { slit_5, *suffix_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_1), _fx_cleanup); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_1, 0), _fx_cleanup); if (id_opt_0->tag == 2) { fx_str_t v_4 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_4, 0), _fx_catch_2); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_4, 0), _fx_catch_2); _fx_catch_2: ; FX_FREE_STR(&v_4); } _fx_cleanup: ; FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t( struct _fx_R5PP__t* pp_0, struct _fx_N14C_form__ctyp_t_data_t* t_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, void* fx_fv) { int fx_status = 0; fx_str_t slit_0 = FX_MAKE_STR(""); fx_str_t slit_1 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_0, &slit_1, t_0, id_opt_0, false, loc_0, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti( struct _fx_R5PP__t* pp_0, struct _fx_N14C_form__cexp_t_data_t* e_0, int_ pr_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(e_0); if (tag_0 == 1) { _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_0 = &e_0->u.CExpIdent; FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_0->t0, &vcase_0->t1.t1, 0), _fx_catch_0); _fx_catch_0: ; goto _fx_endmatch_1; } if (tag_0 == 2) { _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_1 = &e_0->u.CExpLit; _fx_N14K_form__klit_t* l_0 = &vcase_1->t0; int tag_1 = l_0->tag; if (tag_1 == 8) { fx_str_t slit_0 = FX_MAKE_STR("0"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_1); _fx_catch_1: ; } else if (tag_1 == 6) { fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; int_ res_0; FX_CALL(_fx_F3ordi1C(l_0->u.KLitChar, &res_0, 0), _fx_catch_2); FX_CALL(_fx_F6stringS1i(res_0, &v_0, 0), _fx_catch_2); fx_str_t slit_1 = FX_MAKE_STR("(char_)"); { const fx_str_t strs_0[] = { slit_1, v_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_1), _fx_catch_2); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_1, 0), _fx_catch_2); _fx_catch_2: ; FX_FREE_STR(&v_1); FX_FREE_STR(&v_0); } else if (tag_1 == 5) { _fx_LS sl_0 = 0; fx_str_t v_2 = {0}; fx_str_t* s0_0 = &l_0->u.KLitString; FX_CALL(_fx_M6StringFM5splitLS3SCB(s0_0, (char_)10, true, &sl_0, 0), _fx_catch_4); if (sl_0 == 0) { FX_CALL(_fx_M6StringFM7escapedS2SB(s0_0, true, &v_2, 0), _fx_catch_4); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_2, 0), _fx_catch_4); } else { int_ n_0; FX_CALL(_fx_M4C_ppFM8length1_i1LS(sl_0, &n_0, 0), _fx_catch_4); int_ i_0 = 0; _fx_LS lst_0 = sl_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { fx_str_t s_0 = {0}; fx_str_t s_1 = {0}; fx_str_t v_3 = {0}; fx_str_t* s_2 = &lst_0->hd; bool v_4; if (i_0 < n_0 - 1) { v_4 = true; } else { v_4 = _fx_M6StringFM8endswithB2SC(s0_0, (char_)10, 0); } if (v_4) { fx_str_t slit_2 = FX_MAKE_STR("\n"); { const fx_str_t strs_1[] = { *s_2, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &s_0), _fx_catch_3); } } else { fx_copy_str(s_2, &s_0); } FX_CALL(_fx_M6StringFM7escapedS2SB(&s_0, true, &s_1, 0), _fx_catch_3); if (i_0 == 0) { FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_1, 0), _fx_catch_3); } else { FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_3 = FX_MAKE_STR("U"); { const fx_str_t strs_2[] = { slit_3, s_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &v_3), _fx_catch_3); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_3, 0), _fx_catch_3); } _fx_catch_3: ; FX_FREE_STR(&v_3); FX_FREE_STR(&s_1); FX_FREE_STR(&s_0); FX_CHECK_EXN(_fx_catch_4); } } _fx_catch_4: ; FX_FREE_STR(&v_2); if (sl_0) { _fx_free_LS(&sl_0); } } else { fx_str_t v_5 = {0}; FX_CALL(_fx_M6K_formFM8klit2strS3N14K_form__klit_tBR10Ast__loc_t(l_0, true, &vcase_1->t1.t1, &v_5, 0), _fx_catch_5); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_5, 0), _fx_catch_5); _fx_catch_5: ; FX_FREE_STR(&v_5); } FX_CHECK_EXN(_fx_catch_6); _fx_catch_6: ; goto _fx_endmatch_1; } if (tag_0 == 3) { _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_2 = &e_0->u.CExpBinary; _fx_N17C_form__cbinary_t* bop_0 = &vcase_2->t0; if (bop_0->tag == 14) { _fx_T3SiN13C_pp__assoc_t v_6 = {0}; FX_CALL(_fx_M4C_ppFM10binop2str_T3SiN13C_pp__assoc_t1N17C_form__cbinary_t(bop_0, &v_6, 0), _fx_catch_7); int_ pr0_0 = v_6.t1; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_2->t1, pr0_0, 0), _fx_catch_7); fx_str_t slit_4 = FX_MAKE_STR("["); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_4, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_7); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_2->t2, 0, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR("]"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_5, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); _fx_catch_7: ; _fx_free_T3SiN13C_pp__assoc_t(&v_6); goto _fx_endmatch_1; } } if (tag_0 == 3) { _fx_T3SiN13C_pp__assoc_t v_7 = {0}; fx_str_t bop_str_0 = {0}; _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_3 = &e_0->u.CExpBinary; _fx_N17C_form__cbinary_t* bop_1 = &vcase_3->t0; FX_CALL(_fx_M4C_ppFM10binop2str_T3SiN13C_pp__assoc_t1N17C_form__cbinary_t(bop_1, &v_7, 0), _fx_catch_8); fx_copy_str(&v_7.t0, &bop_str_0); int_ pr0_1 = v_7.t1; _fx_N13C_pp__assoc_t assoc_0 = v_7.t2; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_8); if (pr0_1 < pr_0) { fx_str_t slit_6 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_8); } bool is_shift_0; if (bop_1->tag == 6) { is_shift_0 = true; } else { is_shift_0 = bop_1->tag == 7; } int_ a_pr_0; if (is_shift_0) { a_pr_0 = 1350; } else if (assoc_0.tag == 1) { a_pr_0 = pr0_1; } else { a_pr_0 = pr0_1 + 1; } int_ b_pr_0; if (is_shift_0) { b_pr_0 = 1350; } else if (assoc_0.tag == 2) { b_pr_0 = pr0_1; } else { b_pr_0 = pr0_1 + 1; } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_3->t1, a_pr_0, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &bop_str_0, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_8); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_3->t2, b_pr_0, 0), _fx_catch_8); if (pr0_1 < pr_0) { FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_8); fx_str_t slit_7 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_7, 0), _fx_catch_8); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_8); _fx_catch_8: ; FX_FREE_STR(&bop_str_0); _fx_free_T3SiN13C_pp__assoc_t(&v_7); goto _fx_endmatch_1; } if (tag_0 == 4) { _fx_T3SiN13C_pp__assoc_t v_8 = {0}; fx_str_t uop_str_0 = {0}; _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_4 = &e_0->u.CExpUnary; _fx_N14C_form__cexp_t e_1 = vcase_4->t1; _fx_N16C_form__cunary_t* uop_0 = &vcase_4->t0; FX_CALL(_fx_M4C_ppFM9unop2str_T3SiN13C_pp__assoc_t1N16C_form__cunary_t(uop_0, &v_8, 0), _fx_catch_11); fx_copy_str(&v_8.t0, &uop_str_0); int_ pr0_2 = v_8.t1; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_11); if (pr0_2 < pr_0) { fx_str_t slit_8 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_8, 0), _fx_catch_11); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_11); } int tag_2 = uop_0->tag; bool res_1; if (tag_2 == 9) { res_1 = true; } else if (tag_2 == 10) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_11); if (res_1) { FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_1, pr0_2, 0), _fx_catch_9); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &uop_str_0, 0), _fx_catch_9); _fx_catch_9: ; goto _fx_endmatch_0; } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &uop_str_0, 0), _fx_catch_10); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_1, pr0_2, 0), _fx_catch_10); _fx_catch_10: ; _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_11); if (pr0_2 < pr_0) { FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_11); fx_str_t slit_9 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_9, 0), _fx_catch_11); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_11); _fx_catch_11: ; FX_FREE_STR(&uop_str_0); _fx_free_T3SiN13C_pp__assoc_t(&v_8); goto _fx_endmatch_1; } if (tag_0 == 5) { _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_5 = &e_0->u.CExpMem; FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_5->t0, 1400, 0), _fx_catch_12); fx_str_t slit_10 = FX_MAKE_STR("."); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_10, 0), _fx_catch_12); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_5->t1, &vcase_5->t2.t1, 0), _fx_catch_12); _fx_catch_12: ; goto _fx_endmatch_1; } if (tag_0 == 6) { _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_6 = &e_0->u.CExpArrow; FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_6->t0, 1400, 0), _fx_catch_13); fx_str_t slit_11 = FX_MAKE_STR("->"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_11, 0), _fx_catch_13); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_6->t1, &vcase_6->t2.t1, 0), _fx_catch_13); _fx_catch_13: ; goto _fx_endmatch_1; } if (tag_0 == 7) { _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* vcase_7 = &e_0->u.CExpCast; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_14); fx_str_t slit_12 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_12, 0), _fx_catch_14); FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, vcase_7->t1, &_fx_g10C_pp__None, &vcase_7->t2, 0), _fx_catch_14); fx_str_t slit_13 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_13, 0), _fx_catch_14); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_14); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_7->t0, 1301, 0), _fx_catch_14); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_14); _fx_catch_14: ; goto _fx_endmatch_1; } if (tag_0 == 8) { _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_8 = &e_0->u.CExpTernary; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_15); if (200 < pr_0) { fx_str_t slit_14 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_14, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_15); } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_8->t0, 0, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); fx_str_t slit_15 = FX_MAKE_STR("?"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_15, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_8->t1, 0, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); fx_str_t slit_16 = FX_MAKE_STR(":"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_16, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_8->t2, 0, 0), _fx_catch_15); if (200 < pr_0) { FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_15); fx_str_t slit_17 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_17, 0), _fx_catch_15); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_15); _fx_catch_15: ; goto _fx_endmatch_1; } if (tag_0 == 9) { _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_9 = &e_0->u.CExpCall; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_9->t0, 1400, 0), _fx_catch_16); fx_str_t slit_18 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_18, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t(pp_0, vcase_9->t1, 0), _fx_catch_16); fx_str_t slit_19 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_19, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_16); _fx_catch_16: ; goto _fx_endmatch_1; } if (tag_0 == 10) { _fx_LN14C_form__cexp_t eseq_0 = 0; _fx_LN14C_form__cexp_t eseq_1 = e_0->u.CExpInit.t0; if (eseq_1 != 0) { FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_20 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_20, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_18); int_ i_1 = 0; FX_COPY_PTR(eseq_1, &eseq_0); _fx_LN14C_form__cexp_t lst_1 = eseq_0; for (; lst_1; lst_1 = lst_1->tl, i_1 += 1) { _fx_N14C_form__cexp_t e_2 = lst_1->hd; if (i_1 > 0) { fx_str_t slit_21 = FX_MAKE_STR(","); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_21, 0), _fx_catch_17); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_17); } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_2, 0, 0), _fx_catch_17); _fx_catch_17: ; FX_CHECK_EXN(_fx_catch_18); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_22 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_22, 0), _fx_catch_18); } else { fx_str_t slit_23 = FX_MAKE_STR("{0}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_23, 0), _fx_catch_18); } _fx_catch_18: ; if (eseq_0) { _fx_free_LN14C_form__cexp_t(&eseq_0); } goto _fx_endmatch_1; } if (tag_0 == 11) { _fx_T2N14C_form__ctyp_tR10Ast__loc_t* vcase_10 = &e_0->u.CExpTyp; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_19); FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, vcase_10->t0, &_fx_g10C_pp__None, &vcase_10->t1, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_19); _fx_catch_19: ; goto _fx_endmatch_1; } if (tag_0 == 12) { fx_str_t v_9 = {0}; fx_str_t v_10 = {0}; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_20); FX_CALL(_fx_M6StringFM5stripS1S(&e_0->u.CExpCCode.t0, &v_9, 0), _fx_catch_20); fx_str_t slit_24 = FX_MAKE_STR("\n"); fx_str_t slit_25 = FX_MAKE_STR("\n"); { const fx_str_t strs_3[] = { slit_24, v_9, slit_25 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_10), _fx_catch_20); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_10, 0), _fx_catch_20); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_20); _fx_catch_20: ; FX_FREE_STR(&v_10); FX_FREE_STR(&v_9); goto _fx_endmatch_1; } FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); _fx_endmatch_1: ; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t( struct _fx_R5PP__t* pp_0, struct _fx_LN14C_form__cexp_t_data_t* el_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int_ i_0 = 0; _fx_LN14C_form__cexp_t lst_0 = el_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N14C_form__cexp_t e_0 = lst_0->hd; if (i_0 > 0) { fx_str_t slit_0 = FX_MAKE_STR(","); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_0); } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_0, 0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_cleanup); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM14pprint_fun_hdrv5R5PP__tR9Ast__id_tBR10Ast__loc_tB( struct _fx_R5PP__t* pp_0, struct _fx_R9Ast__id_t* fname_0, bool semicolon_0, struct _fx_R10Ast__loc_t* loc_0, bool fwd_mode_0, void* fx_fv) { _fx_N15C_form__cinfo_t v_0 = {0}; _fx_R17C_form__cdeffun_t v_1 = {0}; fx_str_t cf_cname_0 = {0}; _fx_N14C_form__ctyp_t cf_rt_0 = 0; _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t cf_args_0 = 0; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; int fx_status = 0; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(fname_0, loc_0, &v_0, 0), _fx_cleanup); if (v_0.tag == 3) { _fx_copy_R17C_form__cdeffun_t(&v_0.u.CFun->data, &v_1); } else { fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_exn_t v_6 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(fname_0, loc_0, &v_4, 0), _fx_catch_0); fx_str_t slit_0 = FX_MAKE_STR("the forward declaration of "); fx_str_t slit_1 = FX_MAKE_STR(" does not reference a function"); { const fx_str_t strs_0[] = { slit_0, v_4, slit_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_5), _fx_catch_0); } FX_CALL(_fx_M3AstFM11compile_errE2RM5loc_tS(loc_0, &v_5, &v_6, 0), _fx_catch_0); FX_THROW(&v_6, false, _fx_catch_0); _fx_catch_0: ; fx_free_exn(&v_6); FX_FREE_STR(&v_5); FX_FREE_STR(&v_4); } FX_CHECK_EXN(_fx_cleanup); _fx_R10Ast__loc_t cf_loc_0 = v_1.cf_loc; _fx_R16Ast__fun_flags_t cf_flags_0 = v_1.cf_flags; fx_copy_str(&v_1.cf_cname, &cf_cname_0); FX_COPY_PTR(v_1.cf_rt, &cf_rt_0); FX_COPY_PTR(v_1.cf_args, &cf_args_0); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_cleanup); if (cf_flags_0.fun_flag_private) { fx_str_t slit_2 = FX_MAKE_STR("static "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_cleanup); } else { fx_str_t slit_3 = FX_MAKE_STR("FX_EXTERN_C "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_3, 0), _fx_cleanup); } fx_str_t slit_4 = FX_MAKE_STR(""); fx_str_t slit_5 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_4, &slit_5, cf_rt_0, &_fx_g10C_pp__None, false, &cf_loc_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &cf_cname_0, 0), _fx_cleanup); fx_str_t slit_6 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_cleanup); if (cf_args_0 == 0) { fx_str_t slit_7 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_7, 0), _fx_catch_1); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_1); _fx_catch_1: ; } else { int_ nargs_0 = _fx_M4C_ppFM6lengthi1LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(cf_args_0, 0); int_ i_0 = 0; _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t lst_0 = cf_args_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N14C_form__ctyp_t t_0 = 0; fx_str_t v_7 = {0}; _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* __pat___0 = &lst_0->hd; _fx_R9Ast__id_t n_0 = __pat___0->t0; FX_COPY_PTR(__pat___0->t1, &t_0); bool last_0 = i_0 == nargs_0 - 1; if (last_0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_copy_str(&slit_8, &v_7); } else { fx_str_t slit_9 = FX_MAKE_STR(","); fx_copy_str(&slit_9, &v_7); } _fx_Nt6option1R9Ast__id_t v_8; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(&n_0, &v_8); fx_str_t slit_10 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_10, &v_7, t_0, &v_8, true, &cf_loc_0, 0), _fx_catch_2); if (!last_0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_2); } _fx_catch_2: ; FX_FREE_STR(&v_7); if (t_0) { _fx_free_N14C_form__ctyp_t(&t_0); } FX_CHECK_EXN(_fx_catch_3); } _fx_catch_3: ; } FX_CHECK_EXN(_fx_cleanup); if (semicolon_0) { fx_str_t slit_11 = FX_MAKE_STR(";"); fx_copy_str(&slit_11, &v_2); } else { fx_str_t slit_12 = FX_MAKE_STR(""); fx_copy_str(&slit_12, &v_2); } fx_str_t slit_13 = FX_MAKE_STR(")"); { const fx_str_t strs_1[] = { slit_13, v_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_3), _fx_cleanup); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_cleanup); _fx_cleanup: ; _fx_free_N15C_form__cinfo_t(&v_0); _fx_free_R17C_form__cdeffun_t(&v_1); FX_FREE_STR(&cf_cname_0); if (cf_rt_0) { _fx_free_N14C_form__ctyp_t(&cf_rt_0); } if (cf_args_0) { _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&cf_args_0); } FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv) { _fx_LN15C_form__cstmt_t sl_0 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(s_0); if (tag_0 == 7) { FX_COPY_PTR(s_0->u.CStmtBlock.t0, &sl_0); } else if (tag_0 != 1) { FX_CALL(_fx_cons_LN15C_form__cstmt_t(s_0, 0, true, &sl_0), _fx_catch_0); _fx_catch_0: ; } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6beginvv2RM1ti(pp_0, 0, 0), _fx_cleanup); int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_0 = sl_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_1 = lst_0->hd; if (i_0 > 0) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_1); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_1, 0), _fx_catch_1); _fx_catch_1: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_cleanup); _fx_cleanup: ; if (sl_0) { _fx_free_LN15C_form__cstmt_t(&sl_0); } return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM21pprint_cstmt_as_blockv2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv) { _fx_LN15C_form__cstmt_t sl_0 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(s_0); if (tag_0 == 7) { FX_COPY_PTR(s_0->u.CStmtBlock.t0, &sl_0); } else if (tag_0 != 1) { FX_CALL(_fx_cons_LN15C_form__cstmt_t(s_0, 0, true, &sl_0), _fx_catch_0); _fx_catch_0: ; } FX_CHECK_EXN(_fx_cleanup); if (sl_0 == 0) { fx_str_t slit_0 = FX_MAKE_STR("{}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_1); _fx_catch_1: ; } else { FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_1 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_3); int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_0 = sl_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_1 = lst_0->hd; FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_2); FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_1, 0), _fx_catch_2); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_3); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_2 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_catch_3); _fx_catch_3: ; } _fx_cleanup: ; if (sl_0) { _fx_free_LN15C_form__cstmt_t(&sl_0); } return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(s_0); if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR("{}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_0); _fx_catch_0: ; } else if (tag_0 == 2) { FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_0->u.CComment.t0, 0), _fx_catch_1); _fx_catch_1: ; } else if (tag_0 == 3) { _fx_N14C_form__cexp_t e_0 = s_0->u.CExp; FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_0, 0, 0), _fx_catch_3); if (FX_REC_VARIANT_TAG(e_0) != 12) { fx_str_t slit_1 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_2); _fx_catch_2: ; } FX_CHECK_EXN(_fx_catch_3); _fx_catch_3: ; } else if (tag_0 == 4) { fx_str_t slit_2 = FX_MAKE_STR("break;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_catch_4); _fx_catch_4: ; } else if (tag_0 == 5) { fx_str_t slit_3 = FX_MAKE_STR("continue;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_3, 0), _fx_catch_5); _fx_catch_5: ; } else if (tag_0 == 6) { _fx_Nt6option1N14C_form__cexp_t* e_opt_0 = &s_0->u.CStmtReturn.t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_4 = FX_MAKE_STR("return"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_4, 0), _fx_catch_7); if (e_opt_0->tag == 2) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_6); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_opt_0->u.Some, 0, 0), _fx_catch_6); _fx_catch_6: ; } FX_CHECK_EXN(_fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_5, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); _fx_catch_7: ; } else if (tag_0 == 7) { FX_CALL(_fx_M4C_ppFM21pprint_cstmt_as_blockv2R5PP__tN15C_form__cstmt_t(pp_0, s_0, 0), _fx_catch_8); _fx_catch_8: ; } else if (tag_0 == 8) { fx_str_t v_0 = {0}; fx_str_t nstr_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; _fx_T2R9Ast__id_tN15C_form__cstmt_t* vcase_0 = &s_0->u.CStmtSync; _fx_R9Ast__id_t* n_0 = &vcase_0->t0; if (_fx_g12Options__opt.enable_openmp) { FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_9); fx_str_t slit_6 = FX_MAKE_STR("#pragma omp critical"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_catch_9); bool res_0; FX_CALL(_fx_M4C_ppFM6__ne__B2R9Ast__id_tR9Ast__id_t(n_0, &_fx_g9Ast__noid, &res_0, 0), _fx_catch_9); if (res_0) { FX_CALL(_fx_M3AstFM2ppS1RM4id_t(n_0, &v_0, 0), _fx_catch_9); fx_str_t slit_7 = FX_MAKE_STR("."); fx_str_t slit_8 = FX_MAKE_STR("__"); FX_CALL(_fx_M6StringFM7replaceS3SSS(&v_0, &slit_7, &slit_8, &nstr_0, 0), _fx_catch_9); FX_CALL(_fx_M4C_ppFM6stringS1S(&nstr_0, &v_1, 0), _fx_catch_9); fx_str_t slit_9 = FX_MAKE_STR(" ("); fx_str_t slit_10 = FX_MAKE_STR(")"); { const fx_str_t strs_0[] = { slit_9, v_1, slit_10 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_2), _fx_catch_9); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_2, 0), _fx_catch_9); } FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_9); } FX_CALL(_fx_M4C_ppFM21pprint_cstmt_as_blockv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_0->t1, 0), _fx_catch_9); _fx_catch_9: ; FX_FREE_STR(&v_2); FX_FREE_STR(&v_1); FX_FREE_STR(&nstr_0); FX_FREE_STR(&v_0); } else if (tag_0 == 9) { _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* vcase_1 = &s_0->u.CStmtIf; fx_str_t slit_11 = FX_MAKE_STR("if"); FX_CALL( _fx_M4C_ppFM16print_cascade_ifv5SN14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR5PP__t(&slit_11, vcase_1->t0, vcase_1->t1, vcase_1->t2, pp_0, 0), _fx_catch_10); _fx_catch_10: ; } else if (tag_0 == 10) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_2 = &s_0->u.CStmtGoto; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_11); fx_str_t slit_12 = FX_MAKE_STR("goto"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_12, 0), _fx_catch_11); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_11); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_2->t0, &vcase_2->t1, 0), _fx_catch_11); fx_str_t slit_13 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_13, 0), _fx_catch_11); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_11); _fx_catch_11: ; } else if (tag_0 == 11) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_3 = &s_0->u.CStmtLabel; FX_CALL(_fx_M2PPFM6breakuv1RM1t(pp_0, 0), _fx_catch_12); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_3->t0, &vcase_3->t1, 0), _fx_catch_12); fx_str_t slit_14 = FX_MAKE_STR(": ;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_14, 0), _fx_catch_12); _fx_catch_12: ; } else if (tag_0 == 12) { _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* vcase_4 = &s_0->u.CStmtFor; _fx_LN14C_form__cexp_t e3_0 = vcase_4->t3; _fx_Nt6option1N14C_form__cexp_t* e2_opt_0 = &vcase_4->t2; _fx_LN14C_form__cexp_t e1_0 = vcase_4->t1; _fx_Nt6option1N14C_form__ctyp_t* t_opt_0 = &vcase_4->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_17); fx_str_t slit_15 = FX_MAKE_STR("for ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_15, 0), _fx_catch_17); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_17); if (e1_0 != 0) { if (t_opt_0->tag == 2) { FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, t_opt_0->u.Some, &_fx_g10C_pp__None, &vcase_4->t5, 0), _fx_catch_13); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_13); _fx_catch_13: ; } FX_CHECK_EXN(_fx_catch_14); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_14); FX_CALL(_fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t(pp_0, e1_0, 0), _fx_catch_14); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_14); _fx_catch_14: ; } FX_CHECK_EXN(_fx_catch_17); fx_str_t slit_16 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_16, 0), _fx_catch_17); if (e2_opt_0->tag == 2) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e2_opt_0->u.Some, 0, 0), _fx_catch_15); _fx_catch_15: ; } FX_CHECK_EXN(_fx_catch_17); fx_str_t slit_17 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_17, 0), _fx_catch_17); if (e3_0 != 0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t(pp_0, e3_0, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_16); _fx_catch_16: ; } FX_CHECK_EXN(_fx_catch_17); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_17); fx_str_t slit_18 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_18, 0), _fx_catch_17); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_17); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_4->t4, 0), _fx_catch_17); _fx_catch_17: ; } else if (tag_0 == 13) { _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* vcase_5 = &s_0->u.CStmtWhile; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_19 = FX_MAKE_STR("while ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_19, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_18); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_5->t0, 0, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_20 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_20, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_18); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_5->t1, 0), _fx_catch_18); _fx_catch_18: ; } else if (tag_0 == 14) { _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* vcase_6 = &s_0->u.CStmtDoWhile; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_19); fx_str_t slit_21 = FX_MAKE_STR("do"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_21, 0), _fx_catch_19); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_6->t0, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_19); fx_str_t slit_22 = FX_MAKE_STR("while ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_22, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_19); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_6->t1, 0, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_19); fx_str_t slit_23 = FX_MAKE_STR(");"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_23, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_19); _fx_catch_19: ; } else if (tag_0 == 15) { _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t cases_0 = 0; _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* vcase_7 = &s_0->u.CStmtSwitch; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_25); fx_str_t slit_24 = FX_MAKE_STR("switch ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_24, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_25); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_7->t0, 0, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_25); fx_str_t slit_25 = FX_MAKE_STR(") {"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_25, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_25); FX_COPY_PTR(vcase_7->t1, &cases_0); _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t lst_0 = cases_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_LN14C_form__cexp_t labels_0 = 0; _fx_LN15C_form__cstmt_t code_0 = 0; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* __pat___0 = &lst_0->hd; FX_COPY_PTR(__pat___0->t0, &labels_0); FX_COPY_PTR(__pat___0->t1, &code_0); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_24); bool isdefault_0; if (labels_0 == 0) { fx_str_t slit_26 = FX_MAKE_STR("default:"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_26, 0), _fx_catch_20); isdefault_0 = true; _fx_catch_20: ; } else { _fx_LN14C_form__cexp_t lst_1 = labels_0; for (; lst_1; lst_1 = lst_1->tl) { _fx_N14C_form__cexp_t l_0 = lst_1->hd; fx_str_t slit_27 = FX_MAKE_STR("case "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_27, 0), _fx_catch_21); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, l_0, 0, 0), _fx_catch_21); fx_str_t slit_28 = FX_MAKE_STR(":"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_28, 0), _fx_catch_21); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_21); _fx_catch_21: ; FX_CHECK_EXN(_fx_catch_22); } isdefault_0 = false; _fx_catch_22: ; } FX_CHECK_EXN(_fx_catch_24); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_24); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_24); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_24); int_ v_5; FX_CALL(_fx_M4C_ppFM8length1_i1LN15C_form__cstmt_t(code_0, &v_5, 0), _fx_catch_24); int_ t_0; if (isdefault_0) { t_0 = 0; } else { t_0 = 1; } int_ codelen_0 = v_5 + t_0; int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_2 = code_0; for (; lst_2; lst_2 = lst_2->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_1 = lst_2->hd; if (i_0 == 0) { fx_str_t slit_29 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_29, 0), _fx_catch_23); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_1, 0), _fx_catch_23); if (i_0 < codelen_0 - 1) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_23); } _fx_catch_23: ; FX_CHECK_EXN(_fx_catch_24); } if (isdefault_0) { if (code_0 == 0) { FX_CALL(_fx_F7__mul__S2Ci((char_)32, 3, &v_3, 0), _fx_catch_24); fx_str_t slit_30 = FX_MAKE_STR(";"); { const fx_str_t strs_1[] = { v_3, slit_30 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_4), _fx_catch_24); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_4, 0), _fx_catch_24); } } else { fx_str_t slit_31 = FX_MAKE_STR("break;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_31, 0), _fx_catch_24); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_24); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_24); _fx_catch_24: ; FX_FREE_STR(&v_4); FX_FREE_STR(&v_3); if (code_0) { _fx_free_LN15C_form__cstmt_t(&code_0); } if (labels_0) { _fx_free_LN14C_form__cexp_t(&labels_0); } FX_CHECK_EXN(_fx_catch_25); } fx_str_t slit_32 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_32, 0), _fx_catch_25); _fx_catch_25: ; if (cases_0) { _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&cases_0); } } else if (tag_0 == 16) { _fx_N15C_form__cinfo_t v_6 = {0}; _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* vcase_8 = &s_0->u.CDefVal; _fx_R10Ast__loc_t* loc_0 = &vcase_8->t3; _fx_Nt6option1N14C_form__cexp_t* e_opt_1 = &vcase_8->t2; _fx_R9Ast__id_t* n_1 = &vcase_8->t1; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(n_1, loc_0, &v_6, 0), _fx_catch_27); bool is_private_0; if (v_6.tag == 2) { is_private_0 = v_6.u.CVal.cv_flags.val_flag_private; } else { is_private_0 = false; } FX_CHECK_EXN(_fx_catch_27); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_27); if (is_private_0) { fx_str_t slit_33 = FX_MAKE_STR("static"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_33, 0), _fx_catch_27); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_27); } _fx_Nt6option1R9Ast__id_t v_7; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(n_1, &v_7); FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, vcase_8->t0, &v_7, loc_0, 0), _fx_catch_27); if (e_opt_1->tag == 2) { fx_str_t slit_34 = FX_MAKE_STR(" ="); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_34, 0), _fx_catch_26); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_26); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_opt_1->u.Some, 0, 0), _fx_catch_26); _fx_catch_26: ; } FX_CHECK_EXN(_fx_catch_27); fx_str_t slit_35 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_35, 0), _fx_catch_27); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_27); _fx_catch_27: ; _fx_free_N15C_form__cinfo_t(&v_6); } else if (tag_0 == 17) { _fx_LN15C_form__cstmt_t cf_body_0 = 0; _fx_R17C_form__cdeffun_t* v_8 = &s_0->u.CDefFun->data; _fx_R10Ast__loc_t cf_loc_0 = v_8->cf_loc; FX_COPY_PTR(v_8->cf_body, &cf_body_0); _fx_R9Ast__id_t cf_name_0 = v_8->cf_name; FX_CALL(_fx_M4C_ppFM14pprint_fun_hdrv5R5PP__tR9Ast__id_tBR10Ast__loc_tB(pp_0, &cf_name_0, false, &cf_loc_0, false, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_29); fx_str_t slit_36 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_36, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_29); int_ i_1 = 0; _fx_LN15C_form__cstmt_t lst_3 = cf_body_0; for (; lst_3; lst_3 = lst_3->tl, i_1 += 1) { _fx_N15C_form__cstmt_t s_2 = lst_3->hd; if (i_1 > 0) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_28); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_2, 0), _fx_catch_28); _fx_catch_28: ; FX_CHECK_EXN(_fx_catch_29); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_29); fx_str_t slit_37 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_37, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_29); _fx_catch_29: ; if (cf_body_0) { _fx_free_LN15C_form__cstmt_t(&cf_body_0); } } else if (tag_0 == 19) { _fx_N15C_form__cinfo_t v_9 = {0}; _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_9 = &s_0->u.CDefForwardSym; _fx_R10Ast__loc_t* cf_loc_1 = &vcase_9->t1; _fx_R9Ast__id_t* cf_name_1 = &vcase_9->t0; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(cf_name_1, cf_loc_1, &v_9, 0), _fx_catch_33); int tag_1 = v_9.tag; if (tag_1 == 3) { FX_CALL(_fx_M4C_ppFM14pprint_fun_hdrv5R5PP__tR9Ast__id_tBR10Ast__loc_tB(pp_0, cf_name_1, true, cf_loc_1, true, 0), _fx_catch_30); _fx_catch_30: ; } else if (tag_1 == 2) { FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_31); fx_str_t slit_38 = FX_MAKE_STR("FX_EXTERN_C_VAL("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_38, 0), _fx_catch_31); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_31); _fx_Nt6option1R9Ast__id_t v_10; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(cf_name_1, &v_10); fx_str_t slit_39 = FX_MAKE_STR(""); fx_str_t slit_40 = FX_MAKE_STR(")"); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_39, &slit_40, v_9.u.CVal.cv_typ, &v_10, true, cf_loc_1, 0), _fx_catch_31); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_31); _fx_catch_31: ; } else { fx_str_t v_11 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; fx_exn_t v_14 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(cf_name_1, cf_loc_1, &v_11, 0), _fx_catch_32); FX_CALL(_fx_M4C_ppFM6stringS1S(&v_11, &v_12, 0), _fx_catch_32); fx_str_t slit_41 = FX_MAKE_STR("the forward declaration of "); fx_str_t slit_42 = FX_MAKE_STR(" does not reference a function or a value"); { const fx_str_t strs_2[] = { slit_41, v_12, slit_42 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_13), _fx_catch_32); } FX_CALL(_fx_M3AstFM11compile_errE2RM5loc_tS(cf_loc_1, &v_13, &v_14, 0), _fx_catch_32); FX_THROW(&v_14, false, _fx_catch_32); _fx_catch_32: ; fx_free_exn(&v_14); FX_FREE_STR(&v_13); FX_FREE_STR(&v_12); FX_FREE_STR(&v_11); } FX_CHECK_EXN(_fx_catch_33); _fx_catch_33: ; _fx_free_N15C_form__cinfo_t(&v_9); } else if (tag_0 == 18) { _fx_N14C_form__ctyp_t ct_typ_0 = 0; _fx_R17C_form__cdeftyp_t* v_15 = &s_0->u.CDefTyp->data; _fx_R10Ast__loc_t ct_loc_0 = v_15->ct_loc; FX_COPY_PTR(v_15->ct_typ, &ct_typ_0); _fx_R9Ast__id_t ct_name_0 = v_15->ct_name; _fx_Nt6option1R9Ast__id_t v_16; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(&ct_name_0, &v_16); fx_str_t slit_43 = FX_MAKE_STR("typedef "); fx_str_t slit_44 = FX_MAKE_STR(";"); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_43, &slit_44, ct_typ_0, &v_16, true, &ct_loc_0, 0), _fx_catch_34); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_34); _fx_catch_34: ; if (ct_typ_0) { _fx_free_N14C_form__ctyp_t(&ct_typ_0); } } else if (tag_0 == 20) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_10 = &s_0->u.CDefForwardTyp; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_35); fx_str_t slit_45 = FX_MAKE_STR("struct "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_45, 0), _fx_catch_35); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_10->t0, &vcase_10->t1, 0), _fx_catch_35); fx_str_t slit_46 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_46, 0), _fx_catch_35); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_35); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_35); _fx_catch_35: ; } else if (tag_0 == 21) { _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t cenum_members_0 = 0; fx_str_t cenum_cname_0 = {0}; _fx_R18C_form__cdefenum_t* v_17 = &s_0->u.CDefEnum->data; _fx_R10Ast__loc_t cenum_loc_0 = v_17->cenum_loc; FX_COPY_PTR(v_17->cenum_members, &cenum_members_0); fx_copy_str(&v_17->cenum_cname, &cenum_cname_0); fx_str_t slit_47 = FX_MAKE_STR("typedef enum {"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_47, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_38); int_ i_2 = 0; _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t lst_4 = cenum_members_0; for (; lst_4; lst_4 = lst_4->tl, i_2 += 1) { _fx_Nt6option1N14C_form__cexp_t e_opt_2 = {0}; _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* __pat___1 = &lst_4->hd; _fx_R9Ast__id_t n_2 = __pat___1->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&__pat___1->t1, &e_opt_2); if (i_2 == 0) { fx_str_t slit_48 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_48, 0), _fx_catch_37); } else { fx_str_t slit_49 = FX_MAKE_STR(","); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_49, 0), _fx_catch_37); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_37); } FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &n_2, &cenum_loc_0, 0), _fx_catch_37); if (e_opt_2.tag == 2) { fx_str_t slit_50 = FX_MAKE_STR("="); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_50, 0), _fx_catch_36); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_opt_2.u.Some, 0, 0), _fx_catch_36); _fx_catch_36: ; } FX_CHECK_EXN(_fx_catch_37); _fx_catch_37: ; _fx_free_Nt6option1N14C_form__cexp_t(&e_opt_2); FX_CHECK_EXN(_fx_catch_38); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_38); fx_str_t slit_51 = FX_MAKE_STR("} "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_51, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &cenum_cname_0, 0), _fx_catch_38); fx_str_t slit_52 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_52, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_38); _fx_catch_38: ; FX_FREE_STR(&cenum_cname_0); if (cenum_members_0) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&cenum_members_0); } } else if (tag_0 == 22) { fx_str_t ci_cname_0 = {0}; fx_str_t v_18 = {0}; fx_str_t v_19 = {0}; fx_str_t vtbl_cname_0 = {0}; fx_str_t v_20 = {0}; fx_str_t v_21 = {0}; _fx_R23C_form__cdefinterface_t* v_22 = &s_0->u.CDefInterface->data; _fx_R10Ast__loc_t ci_loc_0 = v_22->ci_loc; _fx_R9Ast__id_t ci_vtbl_0 = v_22->ci_vtbl; fx_copy_str(&v_22->ci_cname, &ci_cname_0); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M4C_ppFM6stringS1S(&ci_cname_0, &v_18, 0), _fx_catch_39); fx_str_t slit_53 = FX_MAKE_STR("typedef struct "); fx_str_t slit_54 = FX_MAKE_STR(" {"); { const fx_str_t strs_3[] = { slit_53, v_18, slit_54 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_19), _fx_catch_39); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_19, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M6C_formFM13get_idc_cnameS2R9Ast__id_tR10Ast__loc_t(&ci_vtbl_0, &ci_loc_0, &vtbl_cname_0, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &vtbl_cname_0, 0), _fx_catch_39); fx_str_t slit_55 = FX_MAKE_STR("* vtbl;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_55, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); fx_str_t slit_56 = FX_MAKE_STR("fx_object_t* obj;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_56, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M4C_ppFM6stringS1S(&ci_cname_0, &v_20, 0), _fx_catch_39); fx_str_t slit_57 = FX_MAKE_STR("} "); fx_str_t slit_58 = FX_MAKE_STR(";"); { const fx_str_t strs_4[] = { slit_57, v_20, slit_58 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_21), _fx_catch_39); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_21, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); _fx_catch_39: ; FX_FREE_STR(&v_21); FX_FREE_STR(&v_20); FX_FREE_STR(&vtbl_cname_0); FX_FREE_STR(&v_19); FX_FREE_STR(&v_18); FX_FREE_STR(&ci_cname_0); } else if (tag_0 == 23) { _fx_LN15C_form__cstmt_t cm_body_0 = 0; _fx_LR9Ast__id_t cm_args_0 = 0; fx_str_t cm_cname_0 = {0}; _fx_R19C_form__cdefmacro_t* v_23 = &s_0->u.CMacroDef->data; _fx_R10Ast__loc_t cm_loc_0 = v_23->cm_loc; FX_COPY_PTR(v_23->cm_body, &cm_body_0); FX_COPY_PTR(v_23->cm_args, &cm_args_0); fx_copy_str(&v_23->cm_cname, &cm_cname_0); fx_str_t slit_59 = FX_MAKE_STR("#define "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_59, 0), _fx_catch_44); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &cm_cname_0, 0), _fx_catch_44); if (cm_args_0 != 0) { fx_str_t slit_60 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_60, 0), _fx_catch_41); int_ i_3 = 0; _fx_LR9Ast__id_t lst_5 = cm_args_0; for (; lst_5; lst_5 = lst_5->tl, i_3 += 1) { _fx_R9Ast__id_t* a_0 = &lst_5->hd; if (i_3 > 0) { fx_str_t slit_61 = FX_MAKE_STR(", "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_61, 0), _fx_catch_40); } FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, a_0, &cm_loc_0, 0), _fx_catch_40); _fx_catch_40: ; FX_CHECK_EXN(_fx_catch_41); } fx_str_t slit_62 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_62, 0), _fx_catch_41); _fx_catch_41: ; } FX_CHECK_EXN(_fx_catch_44); if (cm_body_0 != 0) { _fx_LN15C_form__cstmt_t lst_6 = cm_body_0; for (; lst_6; lst_6 = lst_6->tl) { _fx_N15C_form__cstmt_t s_3 = lst_6->hd; FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_42); fx_str_t slit_63 = FX_MAKE_STR("\\"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_63, 0), _fx_catch_42); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_42); fx_str_t slit_64 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_64, 0), _fx_catch_42); FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_3, 0), _fx_catch_42); _fx_catch_42: ; FX_CHECK_EXN(_fx_catch_43); } _fx_catch_43: ; } FX_CHECK_EXN(_fx_catch_44); _fx_catch_44: ; FX_FREE_STR(&cm_cname_0); FX_FREE_LIST_SIMPLE(&cm_args_0); if (cm_body_0) { _fx_free_LN15C_form__cstmt_t(&cm_body_0); } } else if (tag_0 == 24) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_11 = &s_0->u.CMacroUndef; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_45); fx_str_t slit_65 = FX_MAKE_STR("#undef "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_65, 0), _fx_catch_45); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_11->t0, &vcase_11->t1, 0), _fx_catch_45); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_45); _fx_catch_45: ; } else if (tag_0 == 25) { _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t cs_l_0 = 0; _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* vcase_12 = &s_0->u.CMacroIf; _fx_LN15C_form__cstmt_t else_l_0 = vcase_12->t1; int_ i_4 = 0; FX_COPY_PTR(vcase_12->t0, &cs_l_0); _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t lst_7 = cs_l_0; for (; lst_7; lst_7 = lst_7->tl, i_4 += 1) { _fx_N14C_form__cexp_t c_0 = 0; _fx_LN15C_form__cstmt_t sl_0 = 0; fx_str_t v_24 = {0}; _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* __pat___2 = &lst_7->hd; FX_COPY_PTR(__pat___2->t0, &c_0); FX_COPY_PTR(__pat___2->t1, &sl_0); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_47); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_47); if (i_4 == 0) { fx_str_t slit_66 = FX_MAKE_STR("#if "); fx_copy_str(&slit_66, &v_24); } else { fx_str_t slit_67 = FX_MAKE_STR("#elif "); fx_copy_str(&slit_67, &v_24); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_24, 0), _fx_catch_47); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, c_0, 0, 0), _fx_catch_47); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_47); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_47); _fx_LN15C_form__cstmt_t lst_8 = sl_0; for (; lst_8; lst_8 = lst_8->tl) { _fx_N15C_form__cstmt_t s_4 = lst_8->hd; FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_4, 0), _fx_catch_46); _fx_catch_46: ; FX_CHECK_EXN(_fx_catch_47); } _fx_catch_47: ; FX_FREE_STR(&v_24); if (sl_0) { _fx_free_LN15C_form__cstmt_t(&sl_0); } if (c_0) { _fx_free_N14C_form__cexp_t(&c_0); } FX_CHECK_EXN(_fx_catch_50); } if (else_l_0 != 0) { _fx_LN15C_form__cstmt_t else_l_1 = 0; FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_49); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_49); fx_str_t slit_68 = FX_MAKE_STR("#else"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_68, 0), _fx_catch_49); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_49); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_49); FX_COPY_PTR(else_l_0, &else_l_1); _fx_LN15C_form__cstmt_t lst_9 = else_l_1; for (; lst_9; lst_9 = lst_9->tl) { _fx_N15C_form__cstmt_t s_5 = lst_9->hd; FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_5, 0), _fx_catch_48); _fx_catch_48: ; FX_CHECK_EXN(_fx_catch_49); } _fx_catch_49: ; if (else_l_1) { _fx_free_LN15C_form__cstmt_t(&else_l_1); } } FX_CHECK_EXN(_fx_catch_50); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_50); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_50); fx_str_t slit_69 = FX_MAKE_STR("#endif"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_69, 0), _fx_catch_50); _fx_catch_50: ; if (cs_l_0) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&cs_l_0); } } else if (tag_0 == 26) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_51); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_51); fx_str_t slit_70 = FX_MAKE_STR("#include "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_70, 0), _fx_catch_51); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_0->u.CMacroInclude.t0, 0), _fx_catch_51); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_51); _fx_catch_51: ; } else if (tag_0 == 27) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_52); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_52); fx_str_t slit_71 = FX_MAKE_STR("#pragma "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_71, 0), _fx_catch_52); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_0->u.CMacroPragma.t0, 0), _fx_catch_52); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_52); _fx_catch_52: ; } else { FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); } _fx_cleanup: ; return fx_status; } static int _fx_M4C_ppFM16print_cascade_ifv5SN14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR5PP__t( fx_str_t* prefix_0, struct _fx_N14C_form__cexp_t_data_t* e_0, struct _fx_N15C_form__cstmt_t_data_t* s1_0, struct _fx_N15C_form__cstmt_t_data_t* s2_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { fx_str_t prefix_1 = {0}; _fx_N14C_form__cexp_t e_1 = 0; _fx_N15C_form__cstmt_t s1_1 = 0; _fx_N15C_form__cstmt_t s2_1 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); fx_copy_str(prefix_0, &prefix_1); FX_COPY_PTR(e_0, &e_1); FX_COPY_PTR(s1_0, &s1_1); FX_COPY_PTR(s2_0, &s2_1); for (;;) { fx_str_t prefix_2 = {0}; _fx_N14C_form__cexp_t e_2 = 0; _fx_N15C_form__cstmt_t s1_2 = 0; _fx_N15C_form__cstmt_t s2_2 = 0; fx_str_t v_0 = {0}; fx_copy_str(&prefix_1, &prefix_2); FX_COPY_PTR(e_1, &e_2); FX_COPY_PTR(s1_1, &s1_2); FX_COPY_PTR(s2_1, &s2_2); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_0 = FX_MAKE_STR(" ("); { const fx_str_t strs_0[] = { prefix_2, slit_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_0), _fx_catch_3); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_3); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_2, 0, 0), _fx_catch_3); fx_str_t slit_1 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_3); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_3); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, s1_2, 0), _fx_catch_3); int tag_0 = FX_REC_VARIANT_TAG(s2_2); bool res_0; if (tag_0 == 1) { res_0 = true; goto _fx_endmatch_0; } if (tag_0 == 7) { if (s2_2->u.CStmtBlock.t0 == 0) { res_0 = true; goto _fx_endmatch_0; } } res_0 = false; _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_3); if (res_0) { FX_BREAK(_fx_catch_0); _fx_catch_0: ; goto _fx_endmatch_1; } if (tag_0 == 9) { _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* vcase_0 = &s2_2->u.CStmtIf; FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_1); fx_str_t slit_2 = FX_MAKE_STR("else if"); FX_FREE_STR(&prefix_1); fx_copy_str(&slit_2, &prefix_1); _fx_N14C_form__cexp_t* e__0 = &vcase_0->t0; _fx_free_N14C_form__cexp_t(&e_1); FX_COPY_PTR(*e__0, &e_1); _fx_N15C_form__cstmt_t* s1__0 = &vcase_0->t1; _fx_free_N15C_form__cstmt_t(&s1_1); FX_COPY_PTR(*s1__0, &s1_1); _fx_N15C_form__cstmt_t* s2__0 = &vcase_0->t2; _fx_free_N15C_form__cstmt_t(&s2_1); FX_COPY_PTR(*s2__0, &s2_1); _fx_catch_1: ; goto _fx_endmatch_1; } FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_2); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_2); fx_str_t slit_3 = FX_MAKE_STR("else"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_3, 0), _fx_catch_2); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_2); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, s2_2, 0), _fx_catch_2); FX_BREAK(_fx_catch_2); _fx_catch_2: ; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_3); _fx_catch_3: ; FX_FREE_STR(&v_0); if (s2_2) { _fx_free_N15C_form__cstmt_t(&s2_2); } if (s1_2) { _fx_free_N15C_form__cstmt_t(&s1_2); } if (e_2) { _fx_free_N14C_form__cexp_t(&e_2); } FX_FREE_STR(&prefix_2); FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } _fx_cleanup: ; FX_FREE_STR(&prefix_1); if (e_1) { _fx_free_N14C_form__cexp_t(&e_1); } if (s1_1) { _fx_free_N15C_form__cstmt_t(&s1_1); } if (s2_1) { _fx_free_N15C_form__cstmt_t(&s2_1); } return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv) { _fx_R5PP__t pp_0 = {0}; _fx_LS all_lines_0 = 0; int fx_status = 0; FX_CALL(_fx_M2PPFM21pprint_to_string_listRM1t2ii(128, 3, &pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6beginvv2RM1ti(&pp_0, 0, 0), _fx_cleanup); int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_0 = code_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_0 = lst_0->hd; if (i_0 != 0) { FX_CALL(_fx_M2PPFM6break0v1RM1t(&pp_0, 0), _fx_catch_0); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(&pp_0, s_0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M2PPFM7newlinev1RM1t(&pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(&pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM5flushv1RM1t(&pp_0, 0), _fx_cleanup); _fx_FPLS0* f_0 = &pp_0.get_f; FX_CALL(f_0->fp(&all_lines_0, f_0->fcv), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR(""); fx_str_t slit_1 = FX_MAKE_STR("\n"); fx_str_t slit_2 = FX_MAKE_STR("\n"); FX_CALL(_fx_F12join_embraceS4SSSLS(&slit_0, &slit_1, &slit_2, all_lines_0, fx_result, 0), _fx_cleanup); _fx_cleanup: ; _fx_free_R5PP__t(&pp_0); if (all_lines_0) { _fx_free_LS(&all_lines_0); } return fx_status; } FX_EXTERN_C int fx_init_C_pp(void) { int fx_status = 0; return fx_status; } FX_EXTERN_C void fx_deinit_C_pp(void) { }
tinyexr.h
/* Copyright (c) 2014 - 2018, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety if ((out - 1) <= ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data window value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data window or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 1024 + static_cast<unsigned int>( 1.2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
ast-dump-openmp-cancellation-point.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp parallel { #pragma omp cancellation point parallel } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-cancellation-point.c:3:1, line:8:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:8:1> // CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:7:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:7:3> // CHECK-NEXT: | `-OMPCancellationPointDirective {{.*}} <line:6:1, col:40> openmp_standalone_directive // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-cancellation-point.c:4:1) *const restrict'
GB_unaryop__ainv_uint8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int8 // op(A') function: GB_tran__ainv_uint8_int8 // C type: uint8_t // A type: int8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int8 ( uint8_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; StmtResult ActOnSyncStmt(SourceLocation SyncLoc, StringRef sv); StmtResult ActOnSpawnStmt(SourceLocation SpawnLoc, StringRef sv, Stmt *S); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); void DiagnoseCilkSpawn(Stmt *S); StmtResult ActOnCilkScopeStmt(SourceLocation ScopeLoc, Stmt *S); StmtResult ActOnCilkSyncStmt(SourceLocation SyncLoc); StmtResult ActOnCilkSpawnStmt(SourceLocation SpawnLoc, Stmt *S); ExprResult ActOnCilkSpawnExpr(SourceLocation SpawnLoc, Expr *E); StmtResult HandleSimpleCilkForStmt(SourceLocation CilkForLoc, SourceLocation LParenLoc, Stmt *First, Expr *Condition, Expr *Increment, SourceLocation RParenLoc, Stmt *Body); StmtResult LiftCilkForLoopLimit(SourceLocation CilkForLoc, Stmt *First, Expr **Second); StmtResult ActOnCilkForStmt(SourceLocation CilkForLoc, SourceLocation LParenLoc, Stmt *Init, DeclStmt *Limit, ConditionResult InitCond, DeclStmt *Begin, DeclStmt *End, ConditionResult second, FullExprArg third, SourceLocation RParenLoc, Stmt *Body, DeclStmt *LoopVar = nullptr); StmtResult BuildCilkForStmt(SourceLocation CilkForLoc, SourceLocation LParenLoc, Stmt *Init, Expr *Cond, Expr *Inc, SourceLocation RParenLoc, Stmt *Body, Expr *LoopCount, Expr *Stride, QualType SpanType); StmtResult ActOnForallStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnCXXForallRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForallRangeStmt( SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Stmt *Index, Stmt *IndexEnd, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForallRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. SmallVector<SourceLocation, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<StringRef> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
CostEvaluatorFull.h
/********************************************************************************************************************** This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich. Licensed under the BSD-2 license (see LICENSE file in main directory) **********************************************************************************************************************/ #pragma once #include <omp.h> #include <math.h> #include <cmath> #include <functional> #include <ct/optcon/costfunction/CostFunctionQuadratic.hpp> #include <ct/optcon/dms/dms_core/OptVectorDms.h> #include <ct/optcon/dms/dms_core/ShotContainer.h> #include <ct/optcon/nlp/DiscreteCostEvaluatorBase.h> namespace ct { namespace optcon { /** * @ingroup DMS * * @brief Performs the full cost integration over the shots * * @tparam STATE_DIM The state dimension * @tparam CONTROL_DIM The input dimension */ template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR = double> class CostEvaluatorFull : public tpl::DiscreteCostEvaluatorBase<SCALAR> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW typedef DmsDimensions<STATE_DIM, CONTROL_DIM, SCALAR> DIMENSIONS; typedef typename DIMENSIONS::state_vector_t state_vector_t; typedef typename DIMENSIONS::control_vector_t control_vector_t; typedef typename DIMENSIONS::state_vector_array_t state_vector_array_t; typedef typename DIMENSIONS::control_vector_array_t control_vector_array_t; typedef typename DIMENSIONS::time_array_t time_array_t; CostEvaluatorFull() = delete; /** * @brief Custom constructor * * @param[in] costFct The cost function * @param[in] w The optimization vector * @param[in] controlSpliner The control spliner * @param[in] shotInt The shot number * @param[in] settings The dms settings */ CostEvaluatorFull(std::shared_ptr<ct::optcon::CostFunctionQuadratic<STATE_DIM, CONTROL_DIM, SCALAR>> costFct, std::shared_ptr<OptVectorDms<STATE_DIM, CONTROL_DIM, SCALAR>> w, std::shared_ptr<SplinerBase<control_vector_t, SCALAR>> controlSpliner, std::vector<std::shared_ptr<ShotContainer<STATE_DIM, CONTROL_DIM, SCALAR>>> shotInt, DmsSettings settings) : costFct_(costFct), w_(w), controlSpliner_(controlSpliner), shotContainers_(shotInt), settings_(settings) { } /** * @brief The destructor. */ ~CostEvaluatorFull() override = default; SCALAR eval() override { SCALAR cost = SCALAR(0.0); #pragma omp parallel for num_threads(settings_.nThreads_) for (auto shotContainer = shotContainers_.begin(); shotContainer < shotContainers_.end(); ++shotContainer) { (*shotContainer)->integrateCost(); } for (auto shotContainer : shotContainers_) cost += shotContainer->getCostIntegrated(); costFct_->setCurrentStateAndControl(w_->getOptimizedState(settings_.N_), control_vector_t::Zero()); cost += costFct_->evaluateTerminal(); return cost; } void evalGradient(size_t grad_length, Eigen::Map<Eigen::Matrix<SCALAR, Eigen::Dynamic, 1>>& grad) override { grad.setZero(); assert(shotContainers_.size() == settings_.N_); // go through all shots, integrate the state trajectories and evaluate cost accordingly // intermediate costs #pragma omp parallel for num_threads(settings_.nThreads_) for (auto shotContainer = shotContainers_.begin(); shotContainer < shotContainers_.end(); ++shotContainer) { (*shotContainer)->integrateCostSensitivities(); } for (size_t shotNr = 0; shotNr < shotContainers_.size(); ++shotNr) { switch (settings_.splineType_) { case DmsSettings::ZERO_ORDER_HOLD: { grad.segment(w_->getStateIndex(shotNr), STATE_DIM) += shotContainers_[shotNr]->getdLdSiIntegrated(); grad.segment(w_->getControlIndex(shotNr), CONTROL_DIM) += shotContainers_[shotNr]->getdLdQiIntegrated(); break; } case DmsSettings::PIECEWISE_LINEAR: { grad.segment(w_->getStateIndex(shotNr), STATE_DIM) += shotContainers_[shotNr]->getdLdSiIntegrated(); grad.segment(w_->getControlIndex(shotNr), CONTROL_DIM) += shotContainers_[shotNr]->getdLdQiIntegrated(); grad.segment(w_->getControlIndex(shotNr + 1), CONTROL_DIM) += shotContainers_[shotNr]->getdLdQip1Integrated(); break; } default: throw(std::runtime_error( " cost gradient not yet implemented for this type of interpolation. Exiting")); } // H-part. // if(settings_.objectiveType_ == DmsSettings::OPTIMIZE_GRID) // { // costFct_->setCurrentStateAndControl(shotContainers_[shotNr]->getStateIntegrated(), // controlSpliner_->evalSpline(shotContainers_[shotNr]->getIntegrationTimeFinal(), shotNr)); // grad(w_->getTimeSegmentIndex(shotNr)) = costFct_->evaluateIntermediate() + shotContainers_[shotNr]->getdLdHiIntegrated(); // } } /* gradient of terminal cost */ costFct_->setCurrentStateAndControl(w_->getOptimizedState(settings_.N_), control_vector_t::Zero()); grad.segment(w_->getStateIndex(settings_.N_), STATE_DIM) += costFct_->stateDerivativeTerminal(); // * dXdSi.back(); } private: std::shared_ptr<ct::optcon::CostFunctionQuadratic<STATE_DIM, CONTROL_DIM, SCALAR>> costFct_; std::shared_ptr<OptVectorDms<STATE_DIM, CONTROL_DIM, SCALAR>> w_; std::shared_ptr<SplinerBase<control_vector_t, SCALAR>> controlSpliner_; std::vector<std::shared_ptr<ShotContainer<STATE_DIM, CONTROL_DIM, SCALAR>>> shotContainers_; const DmsSettings settings_; }; } // namespace optcon } // namespace ct
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 12; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = (n - 1) / 8; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("\n"); /* Q.Bonus */ unsigned char *S = (unsigned char *) malloc(bufferSize * sizeof(unsigned char)); convertEncryptToString(Zmessage, a, Nints, charsPerInt, S); printf("Convert the encrypted text to a string = \"%s\"\n", S); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); unsigned int found; #pragma omp parallel shared(found) { #pragma omp for for (unsigned int i=0;i<p-1;i++) { if (found == 0) { if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i + 1); #pragma omp critical { found = 1; } } } } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
sparse_to_mat.c
#include "main.h" mat_rv coo_to_mat_nothreading(coo matrix) { mat_rv rv; struct timespec start, end; get_utc_time(&start); rv.error = ERR_NONE; rv.rows = matrix.rows; rv.cols = matrix.cols; rv.type = matrix.type; rv.isval = false; if(matrix.type == MAT_INT){ if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } else{ if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } for(int i = 0; i < matrix.length; ++i){ if(!(matrix.elems[i].i < rv.rows && matrix.elems[i].j < rv.cols)){ rv.error = ERR_DIM_MISSMATCH; break; } if(rv.type == MAT_INT){ if (rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0){ rv.error = ERR_DUPLICATE; break; } rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.i; } else{ if (rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0.0){ rv.error = ERR_DUPLICATE; break; } rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.f; } } get_utc_time(&end); rv.t_construct = time_delta(end, start); return rv; } mat_rv coo_to_mat(coo matrix, int thread_count) { mat_rv rv; struct timespec start, end; get_utc_time(&start); rv.error = ERR_NONE; rv.rows = matrix.rows; rv.cols = matrix.cols; rv.type = matrix.type; rv.isval = false; if(matrix.type == MAT_INT){ if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } else{ if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } int i; #pragma omp parallel num_threads(thread_count) shared(matrix, rv) { MAT_TYPE local_type = matrix.type; #pragma omp for private(i) for(i = 0; i < matrix.length; ++i){ if(!(matrix.elems[i].i < rv.rows && matrix.elems[i].j < rv.cols)) rv.error = ERR_DIM_MISSMATCH; if(local_type == MAT_INT){ if(rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0) rv.error = ERR_DUPLICATE; rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.i; } else{ if (rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0.0) rv.error = ERR_DUPLICATE; rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.f; } } } get_utc_time(&end); rv.t_construct = time_delta(end, start); return rv; } mat_rv csr_to_mat_nothreading(csr matrix) { mat_rv rv; struct timespec start, end; get_utc_time(&start); rv.error = ERR_NONE; rv.rows = matrix.rows; rv.cols = matrix.cols; rv.type = matrix.type; rv.isval = false; if(matrix.type == MAT_INT){ if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } else{ if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } for(int i = 0; i < rv.rows; ++i){ //iterator for nnz/ja int a_i = matrix.ia[i]; for(int j = 0; j < rv.cols; ++j){ if(a_i < matrix.ia[i + 1]){ if(matrix.ja[a_i] == j){ if(rv.type == MAT_INT) rv.vals.i[i*rv.cols + j] = matrix.nnz.i[a_i]; else rv.vals.f[i*rv.cols + j] = matrix.nnz.f[a_i]; a_i++; continue; } } if(rv.type == MAT_INT) rv.vals.i[i*rv.cols + j] = 0; else rv.vals.f[i*rv.cols + j] = 0.0; } if(a_i != matrix.ia[i + 1]){ rv.error = ERR_DIM_MISSMATCH; break; } } get_utc_time(&end); rv.t_construct = time_delta(end, start); return rv; } mat_rv csr_to_mat(csr matrix, int thread_count) { mat_rv rv; struct timespec start, end; get_utc_time(&start); rv.error = ERR_NONE; rv.rows = matrix.rows; rv.cols = matrix.cols; rv.type = matrix.type; rv.isval = false; if(matrix.type == MAT_INT){ if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } else{ if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } int i; #pragma omp parallel num_threads(thread_count) shared(rv, matrix) { MAT_TYPE local_type = rv.type; #pragma omp for private(i) for(i = 0; i < rv.rows; ++i){ int a_i = matrix.ia[i]; for(int j = 0; j < rv.cols; ++j){ if(a_i < matrix.ia[i + 1]){ if(matrix.ja[a_i] == j){ if(local_type == MAT_INT) rv.vals.i[i*rv.cols + j] = matrix.nnz.i[a_i]; else rv.vals.f[i*rv.cols + j] = matrix.nnz.f[a_i]; a_i++; continue; } } if(local_type == MAT_INT) rv.vals.i[i*rv.cols + j] = 0; else rv.vals.f[i*rv.cols + j] = 0.0; } if(a_i != matrix.ia[i + 1]) rv.error = ERR_DIM_MISSMATCH; } } get_utc_time(&end); rv.t_construct = time_delta(end, start); return rv; } mat_rv csc_to_mat_nothreading(csc matrix) { mat_rv rv; struct timespec start, end; get_utc_time(&start); rv.error = ERR_NONE; rv.rows = matrix.rows; rv.cols = matrix.cols; rv.type = matrix.type; rv.isval = false; if(matrix.type == MAT_INT){ if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } else{ if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } for(int i = 0; i < rv.cols; ++i){ //iterator for nnz/ja int a_i = matrix.ia[i]; for(int j = 0; j < rv.rows; ++j){ if(a_i < matrix.ia[i + 1]){ if(matrix.ja[a_i] == j){ if(rv.type == MAT_INT) rv.vals.i[j*rv.cols + i] = matrix.nnz.i[a_i]; else rv.vals.f[j*rv.cols + i] = matrix.nnz.f[a_i]; a_i++; continue; } } if(rv.type == MAT_INT) rv.vals.i[j*rv.cols + i] = 0; else rv.vals.f[j*rv.cols + i] = 0.0; } if(a_i != matrix.ia[i + 1]){ rv.error = ERR_DIM_MISSMATCH; break; } } get_utc_time(&end); rv.t_construct = time_delta(end, start); return rv; } mat_rv csc_to_mat(csc matrix, int thread_count) { mat_rv rv; struct timespec start, end; get_utc_time(&start); rv.error = ERR_NONE; rv.cols = matrix.cols; rv.rows = matrix.rows; rv.type = matrix.type; rv.isval = false; if(matrix.type == MAT_INT){ if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } else{ if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){ fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n"); exit(EXIT_FAILURE); } } int i; #pragma omp parallel num_threads(thread_count) shared(rv, matrix) { MAT_TYPE local_type = rv.type; #pragma omp for private(i) for(i = 0; i < rv.cols; ++i){ int a_i = matrix.ia[i]; for(int j = 0; j < rv.rows; ++j){ if(a_i < matrix.ia[i + 1]){ if(matrix.ja[a_i] == j){ if(local_type == MAT_INT) rv.vals.i[i*rv.rows + j] = matrix.nnz.i[a_i]; else rv.vals.f[i*rv.rows + j] = matrix.nnz.f[a_i]; a_i++; continue; } } if(local_type == MAT_INT) rv.vals.i[i*rv.rows + j] = 0; else rv.vals.f[i*rv.rows + j] = 0.0; } if(a_i != matrix.ia[i + 1]) rv.error = ERR_DIM_MISSMATCH; } } get_utc_time(&end); rv.t_construct = time_delta(end, start); return rv; }
dtrmm.c
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "dtrmm.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void * B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static const double zero = 0.0; static const double one = 1.0; void dtrmm(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, double alpha, const double * restrict A, size_t lda, double * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) B[j * ldb + i] = zero; } return; } if (side == CBlasLeft) { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t k = 0; k < m; k++) { if (B[j * ldb + k] != zero) { register double temp = alpha * B[j * ldb + k]; for (size_t i = 0; i < k; i++) B[j * ldb + i] += temp * A[k * lda + i]; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; B[j * ldb + k] = temp; } } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t k = m - 1; do { if (B[j * ldb + k] != zero) { register double temp = alpha * B[j * ldb + k]; B[j * ldb + k] = temp; if (diag == CBlasNonUnit) B[j * ldb + k] *= A[k * lda + k]; for (size_t i = k + 1; i < m; i++) B[j * ldb + i] += temp * A[k * lda + i]; } } while (k-- > 0); } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register double temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = 0; k < i; k++) temp += A[i * lda + k] * B[j * ldb + k]; B[j * ldb + i] = alpha * temp; } while (i-- > 0); } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = i + 1; k < m; k++) temp += A[i * lda + k] * B[j * ldb + k]; B[j * ldb + i] = alpha * temp; } } } } } else { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t j = n - 1; do { register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register double temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } } while (j-- > 0); } else { for (size_t j = 0; j < n; j++) { register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register double temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } } } } else { if (uplo == CBlasUpper) { for (size_t k = 0; k < n; k++) { for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register double temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] = temp * B[k * ldb + i]; } } } else { size_t k = n - 1; do { for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register double temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] = temp * B[k * ldb + i]; } } while (k-- > 0); } } } } void dtrmm2(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, double alpha, const double * restrict A, size_t lda, const double * restrict B, size_t ldb, double * restrict X, size_t ldx) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; else if (ldx < m) info = 13; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) X[j * ldx + i] = zero; } return; } if (side == CBlasLeft) { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t k = 0; k < m; k++) { register double temp = B[j * ldb + k]; if (temp != zero) { temp *= alpha; for (size_t i = 0; i < k; i++) X[j * ldx + i] += temp * A[k * lda + i]; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; } X[j * ldx + k] = temp; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t k = m - 1; do { if (B[j * ldb + k] != zero) { register double temp = alpha * B[j * ldb + k]; X[j * ldx + k] = temp; if (diag == CBlasNonUnit) X[j * ldx + k] *= A[k * lda + k]; for (size_t i = k + 1; i < m; i++) X[j * ldx + i] += temp * A[k * lda + i]; } else X[j * ldx + k] = B[j * ldb + k]; } while (k-- > 0); } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register double temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = 0; k < i; k++) temp += A[i * lda + k] * B[j * ldb + k]; X[j * ldx + i] = alpha * temp; } while (i-- > 0); } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = i + 1; k < m; k++) temp += A[i * lda + k] * B[j * ldb + k]; X[j * ldx + i] = alpha * temp; } } } } } else { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t j = n - 1; do { register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] = temp * B[j * ldb + i]; for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register double temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } } while (j-- > 0); } else { for (size_t j = 0; j < n; j++) { register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] = temp * B[j * ldb + i]; for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register double temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } } } } else { if (uplo == CBlasUpper) { for (size_t k = 0; k < n; k++) { for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register double temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) X[k * ldx + i] = temp * B[k * ldb + i]; } } } else { size_t k = n - 1; do { for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register double temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } register double temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) X[k * ldx + i] = temp * B[k * ldb + i]; } } while (k-- > 0); } } } } CUresult cuDtrmm2(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, double alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUdeviceptr X, size_t ldx, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; else if (ldx < m) info = 13; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->dtrmm2 == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->dtrmm2, imageBytes)); const unsigned int mb = (side == CBlasRight) ? 64 : (trans == CBlasNoTrans) ? 64 : 32; const unsigned int nb = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 8 : 16; const unsigned int kb = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 16 : 8; const unsigned int bx = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 16 : 8; const unsigned int by = (side == CBlasRight) ? 8 : (trans == CBlasNoTrans) ? 4 : 8; char name[67]; snprintf(name, 67, "_Z8dtrmm%c%c%cIL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEvPKdS2_Pddiiiii", side, uplo, trans, diag, mb, nb, kb, bx, by); CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->dtrmm2, name)); void * params[] = { &A, &B, &X, &alpha, &lda, &ldb, &ldx, &m, &n }; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuDtrmm(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, double alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); CUdeviceptr X; size_t ldx; CU_ERROR_CHECK(cuMemAllocPitch(&X, &ldx, m * sizeof(double), n, sizeof(double))); ldx /= sizeof(double); CU_ERROR_CHECK(cuDtrmm2(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, X, ldx, stream)); CU_ERROR_CHECK(cuMemcpyDtoD2DAsync(B, ldb, 0, 0, X, ldx, 0, 0, m, n, sizeof(double), stream)); CU_ERROR_CHECK(cuMemFree(X)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuMultiGPUDtrmm(CUmultiGPUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, double alpha, const double * restrict A, size_t lda, double * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; if (alpha == zero) { dgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb); return CUDA_SUCCESS; } const size_t mb = (trans == CBlasNoTrans) ? DGEMM_N_MB : DGEMM_T_MB; const size_t nb = DGEMM_N_NB; if (m <= mb || n <= nb) { dtrmm(side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb); return CUDA_SUCCESS; } if (side == CBlasLeft) { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t i = (m + mb - 1) & ~(mb - 1); do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } else { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } } else { if (uplo == CBlasUpper) { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasLeft, CBlasUpper, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } else { size_t i = (m + mb - 1) & ~(mb - 1); do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasLeft, CBlasLower, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } } } else { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } else { size_t j = (n + nb - 1) & ~(nb - 1); do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } } else { if (uplo == CBlasUpper) { size_t j = (n + nb - 1) & ~(nb - 1); do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasRight, CBlasUpper, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } else { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUDgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); dtrmm(CBlasRight, CBlasLower, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } } } return CUDA_SUCCESS; }
GB_unaryop__abs_uint16_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_int8 // op(A') function: GB_tran__abs_uint16_int8 // C type: uint16_t // A type: int8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_int8 ( uint16_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB088-dynamic-storage-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* For the case of a variable which is not referenced within a construct: objects with dynamic storage duration should be shared. Putting it within a threadprivate directive may cause seg fault since threadprivate copies are not allocated! Dependence pair: *counter@63:6 vs. *counter@63:6 */ #include "omprace.h" #include <omp.h> #include<stdio.h> #include<stdlib.h> int* counter; void foo() { (*counter)++; } int main() { omprace_init(); counter = (int*) malloc(sizeof(int)); if (counter== NULL) { fprintf(stderr, "malloc() failes\n"); exit(1); } *counter = 0; #pragma omp parallel { foo(); } printf("%d \n", *counter); free (counter); omprace_fini(); return 0; }
lib.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <omp.h> double calculate_euclidean_distance(double *atom_i, double *atom_j) { return sqrt(pow(atom_i[0] - atom_j[0], 2) + pow(atom_i[1] - atom_j[1], 2) + pow(atom_i[2] - atom_j[2], 2)); } double calculate_dynamic_energy(double euclidean_distance, double sigma) { double number; if (euclidean_distance != 0) { number = sigma / euclidean_distance; } else { number = DBL_MAX; } return pow(number, 12) - pow(number, 6); } double lennard_jones_function(double *atoms_position, int n, double epsilon, double sigma) { double total_energy = 0.0; int i, j; for (i = 0; i < n - 1; ++i) { for (j = i + 1; j < n; ++j) { double euclidean_distance = calculate_euclidean_distance(atoms_position + i*3, atoms_position + j*3); double dynamic_energy = calculate_dynamic_energy(euclidean_distance, sigma); total_energy += dynamic_energy; } } total_energy *= 4 * epsilon; return total_energy; } void evaluate(double *population, double *values, int population_size, int number_of_atoms){ int i; #pragma omp parallel for for(i = 0; i < population_size; ++i) { values[i] = lennard_jones_function(&population[i * number_of_atoms * 3], number_of_atoms, 1, 1); } }
matrix_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file matrix_op-inl.h * \brief Function definition of matrix related operators */ #ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #include <mxnet/operator_util.h> #include <vector> #include <algorithm> #include <utility> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "../channel_op_common.h" #include "../mxnet_op.h" #include "broadcast_reduce_op.h" #include "./init_op.h" #include "../../common/static_array.h" #include "./slice-inl.h" #if MXNET_USE_CUDA #include <thrust/device_vector.h> #endif #ifdef __CUDACC__ #include "./pseudo2DTranspose_op-inl.cuh" #endif namespace mxnet { namespace op { struct ReshapeParam : public dmlc::Parameter<ReshapeParam> { mxnet::TShape target_shape; bool keep_highest; mxnet::Tuple<int> shape; bool reverse; DMLC_DECLARE_PARAMETER(ReshapeParam) { DMLC_DECLARE_FIELD(shape) .set_default(mxnet::Tuple<int>()) .describe("The target shape"); DMLC_DECLARE_FIELD(reverse) .set_default(false) .describe("If true then the special values are inferred from right to left"); DMLC_DECLARE_FIELD(target_shape) .set_default(mxnet::TShape(0, -1)) .describe("(Deprecated! Use ``shape`` instead.) " "Target new shape. One and only one dim can be 0, " "in which case it will be inferred from the rest of dims"); DMLC_DECLARE_FIELD(keep_highest).set_default(false) .describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged." "If set to true, then the first dim in target_shape is ignored," "and always fixed as input"); } bool operator==(const ReshapeParam &other) const { return this->target_shape == other.target_shape && this->keep_highest == other.keep_highest && this->shape == other.shape && this->reverse == other.reverse; } }; template<typename IType> inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape, const mxnet::TShape& dshape, bool reverse) { std::vector<IType> dshape_vec; std::vector<IType> param_shape_vec(shape.begin(), shape.end()); for (int i = 0; i < dshape.ndim(); ++i) { dshape_vec.push_back(dshape[i]); } std::vector<IType> tmp; size_t src_idx = 0; int inf_idx = -1; if (reverse) { std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(param_shape_vec.begin(), param_shape_vec.end()); } auto dshape_len = dshape_vec.size(); auto params_len = param_shape_vec.size(); for (size_t i = 0; i < params_len; ++i) { IType proposed_dim = param_shape_vec[i]; if (proposed_dim == 0) { // keep same CHECK_LT(src_idx, dshape_len); tmp.push_back(dshape_vec[src_idx++]); } else if (proposed_dim == -1) { // infer CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred"; inf_idx = i; tmp.push_back(1); src_idx++; } else if (proposed_dim == -2) { // copy all remaining dims from source while (src_idx < dshape_len) { const int dn = dshape_vec[src_idx++]; tmp.push_back(dn); } } else if (proposed_dim == -3) { // merge two dims from source CHECK_LT(src_idx, dshape_len-1); const int d1 = dshape_vec[src_idx++]; const int d2 = dshape_vec[src_idx++]; if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) { tmp.push_back(-1); } else { tmp.push_back(d1 * d2); } } else if (proposed_dim == -4) { // split the source dim s into two dims // read the left dim and then the right dim (either can be -1) CHECK_LT(i + 2, params_len); CHECK_LT(src_idx, dshape_len); const int d0 = dshape_vec[src_idx++]; IType d1 = param_shape_vec[++i]; IType d2 = param_shape_vec[++i]; CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1."; if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) << "Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0; tmp.push_back(d1); tmp.push_back(d2); } else { // greater than 0, new shape tmp.push_back(proposed_dim); src_idx++; } } if (inf_idx >= 0) { if (shape_is_known(dshape)) { IType new_size = 1; for (IType x : tmp) new_size *= x; tmp[inf_idx] = dshape.Size() / new_size; } else { tmp[inf_idx] = -1; } } if (reverse) { std::reverse(param_shape_vec.begin(), param_shape_vec.end()); std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(tmp.begin(), tmp.end()); } mxnet::TShape oshape(tmp.begin(), tmp.end()); return oshape; } inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) { if (shape_is_known(*in) && shape_is_known(out)) { return true; } else if (!shape_is_known(out)) { return false; } else { int zero_axis = -1; int known_dim_size_prod = 1; for (int i = 0; i < in->ndim(); i++) { if (!mxnet::dim_size_is_known(*in, i)) { if (zero_axis != -1) return false; // more than 1 zero found. else zero_axis = i; } else { known_dim_size_prod *= (*in)[i]; } } (*in)[zero_axis] = out.Size() / known_dim_size_prod; return true; } } inline bool ReshapeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape &dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape oshape; if (param_.shape.ndim() != 0) { oshape = InferReshapeShape(param_.shape, dshape, param_.reverse); } else if (param_.target_shape.ndim() != -1) { LOG(INFO) << "Using target_shape will be deprecated."; oshape = param_.target_shape; int neg_count = 0; index_t inf_idx = 0; index_t start_idx = param_.keep_highest ? 1 : 0; if (param_.keep_highest) { oshape[0] = dshape[0]; } for (int i = start_idx; i < oshape.ndim(); ++i) { if (oshape[i] == 0) { neg_count++; inf_idx = i; } } if (neg_count == 1) { oshape[inf_idx] = 1; oshape[inf_idx] = dshape.Size() / oshape.Size(); } } else { return shape_is_known((*out_attrs)[0]) && ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } ReverseReshapeInferShape(&dshape, oshape); #if 0 CHECK_EQ(oshape.Size(), dshape.Size()) << "Target shape size is different to source. " << "Target: " << oshape << "\nSource: " << dshape; #endif SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } inline bool FlattenShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape &dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; int target_dim = 1; for (int i = 1; i < dshape.ndim(); ++i) { target_dim *= dshape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim)); return true; } struct TransposeParam : public dmlc::Parameter<TransposeParam> { mxnet::TShape axes; DMLC_DECLARE_PARAMETER(TransposeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1)) .describe("Target axis order. By default the axes will be inverted."); } bool operator==(const TransposeParam &other) const { return this->axes == other.axes; } }; /*! * \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache * \param in input tensor * \param out output tensor * \param row shape of dim 0 of input * \param col shape of dim 1 of input */ template<typename DType> MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) { // ensure cache line hits and prevent cache miss for any configuration // L1 cache size to be utilized = 32kb = 2^15 // Largest size of a single unit of any dtype <= 8 byte = 2^3 // Number of elements - (2^15/2^3) = 2^12 // Block-size - 2^6 v 2^6 (64 v 64) // But we could leverage unrolling of for loops (for parallelization) // Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled // blocksize * blocksize * num_threads = cache_size / dtype_size // Instead of explicit unroll, let compiler figure out optimal unroll factor index_t blocksize = 32; // collapse 2 parallelizes 2 for loops // inner 2 for loops aren't parallelized to prevent cache miss // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (index_t i = 0; i < row; i += blocksize) { for (index_t j = 0; j < col; j += blocksize) { // transpose the block for (index_t a = j; (a < blocksize + j) && (a < col); ++a) { for (index_t b = i; (b < blocksize + i) && (b < row); ++b) { out[a * row + b] = in[b * col + a]; } } } } } template<typename xpu> void TransposeImpl(RunContext ctx, const TBlob& src, const TBlob& ret, const mxnet::TShape& axes) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(src.type_flag_, ret.type_flag_); // zero-size tensor, no need to compute if (src.shape_.Size() == 0U) return; Stream<xpu> *s = ctx.get_stream<xpu>(); #ifdef __CUDACC__ // This transpose can be used only if there exist n and m such that: // params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1) // Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3). if (isPseudo2DTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { transpose_pseudo2D<DType>(ret, src, axes, s); }); return; } #endif MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { switch (axes.ndim()) { case 0: { Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(1), s); Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(1), s); Copy(out, in, s); break; } case 1: { Tensor<xpu, 1, DType> in = src.get<xpu, 1, DType>(s); Tensor<xpu, 1, DType> out = ret.get<xpu, 1, DType>(s); Copy(out, in, s); break; } case 2: { mshadow::Tensor<xpu, 2, DType> in = src.FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> out = ret.FlatTo2D<xpu, DType>(s); if (axes[0] == 1 && axes[1] == 0) { if (ctx.get_ctx().dev_mask() == cpu::kDevMask) { Transpose2D<DType>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]); } else { out = in.T(); } } else { Copy(out, in, s); } break; } case 3: { Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s); Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s); out = transpose(in, axes.get<3>()); break; } case 4: { Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s); out = transpose(in, axes.get<4>()); break; } case 5: { Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s); Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s); out = transpose(in, axes.get<5>()); break; } case 6: { Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s); Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s); out = transpose(in, axes.get<6>()); break; } default: LOG(FATAL) << "Transpose support at most 6 dimensions"; break; } }); } // matrix transpose template<typename xpu> void Transpose(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (req[0] == kNullOp) { return; } const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK_EQ(req[0], kWriteTo) << "Transpose does not support kWriteInplace and kAddTo"; if (param.axes.ndim() == 0) { mxnet::TShape axes(inputs[0].ndim(), -1); for (int i = 0; i < axes.ndim(); ++i) { axes[i] = axes.ndim() - 1 - i; } TransposeImpl<xpu>(ctx.run_ctx, inputs[0], outputs[0], axes); } else { TransposeImpl<xpu>(ctx.run_ctx, inputs[0], outputs[0], param.axes); } } inline bool TransposeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& shp = (*in_attrs)[0]; mxnet::TShape& out_shp = (*out_attrs)[0]; CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions"; CHECK_NE(shp.ndim(), 0) << "Number of dimensions cannot be 0"; CHECK_NE(out_shp.ndim(), 0) << "Number of dimensions cannot be 0"; if (shp.ndim() == -1 && out_shp.ndim() == -1) return false; // none of the shapes is known if (out_shp.ndim() > 0 && shp.ndim() > 0) CHECK_EQ(out_shp.ndim(), shp.ndim()); mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1); mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1); if (param.axes.ndim() == 0) { for (int i = 0; i < shp.ndim(); ++i) { ret[i] = shp[shp.ndim()-1-i]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[shp.ndim()-1-i] = out_shp[i]; } } else { CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim()); for (int i = 0; i < shp.ndim(); ++i) { CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim())); ret[i] = shp[param.axes[i]]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[param.axes[i]] = out_shp[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 0, get); SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); return shape_is_known(ret); } struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> { int axis; DMLC_DECLARE_PARAMETER(ExpandDimParam) { DMLC_DECLARE_FIELD(axis) .describe("Position where new axis is to be inserted. Suppose that " "the input `NDArray`'s dimension is `ndim`, the range of " "the inserted axis is `[-ndim, ndim]`"); } bool operator==(const ExpandDimParam &other) const { return this->axis == other.axis; } }; inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) { return false; } mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& oshape = (*out_attrs)[0]; int indim = ishape.ndim(); bool unknown_ishape = false; if (-1 == indim) { indim = oshape.ndim() - 1; unknown_ishape = true; } int axis = param.axis; if (axis < 0) { axis += indim + 1; } CHECK(axis >= 0 && axis <= indim) << "axis must be in the range [" << -indim << ", " << indim << "] (" << param.axis << " provided)"; mxnet::TShape ret(indim + 1, -1); for (int i = 0; i < axis; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i]); } ret[axis] = 1; for (int i = axis+1; i < indim+1; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i-1]); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); ret = mxnet::TShape(indim, -1); for (int i = 0; i < axis; ++i) ret[i] = oshape[i]; for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i]; SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret); return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0)); } // Currently MKLDNN only supports step = 1 or step has no value inline bool SupportMKLDNNSlice(const SliceParam& param) { if (param.step.ndim() == 0U) return true; for (int i = 0; i < param.step.ndim(); ++i) { if (param.step[i].has_value() && param.step[i].value() != 1) return false; } return true; } inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); const auto& in_stype = in_attrs->at(0); auto& out_stype = out_attrs->at(0); bool dispatched = false; const auto dispatch_ex = DispatchMode::kFComputeEx; // If step = 1, no need to fallback; otherwise fallback to dense bool trivial_step = false; if (param.step.ndim() == 0U) { trivial_step = true; } else if (param.step.ndim() == 1U && (!param.step[0].has_value() || param.step[0].value() == 1)) { trivial_step = true; } if (in_stype == kDefaultStorage) { #if MXNET_USE_MKLDNN == 1 if (dev_mask == Context::kCPU && MKLDNNEnvSet() && SupportMKLDNNSlice(param)) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex); } #endif if (!dispatched) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } } if (!dispatched && in_stype == kCSRStorage && trivial_step) { dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } // slice the indptr of a csr struct SliceCsrIndPtr { template<typename IType> MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) { KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base); } }; /* * a wrapper to launch SliceCsrIndPtr kernel. * slice [src[begin] .. src[end]) and store in dst[0, end - begin) */ template<typename xpu, typename IType> void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx, const IType* src, IType* dst) { using namespace mshadow; using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); int indptr_len = end - begin + 1; Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin); } /* * Slice a CSR NDArray for first dimension */ template<typename xpu> void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out) { using namespace mshadow; using namespace mxnet_op; using namespace csr; nnvm::dim_t begin_row = begin[0]; nnvm::dim_t end_row = end[0]; nnvm::dim_t indptr_len = end_row - begin_row + 1; out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len)); // assume idx indptr share the same type MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, { MSHADOW_TYPE_SWITCH(in.dtype(), DType, { RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>(); RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>(); SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr); Stream<xpu> *s = ctx.get_stream<xpu>(); RType nnz = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)), Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s)); // return csr zeros if nnz = 0 if (nnz == 0) { out.set_aux_shape(kIdx, Shape1(0)); return; } // copy indices and values out.CheckAndAllocAuxData(kIdx, Shape1(nnz)); out.CheckAndAllocData(Shape1(nnz)); IType* in_idx = in.aux_data(kIdx).dptr<IType>(); IType* out_idx = out.aux_data(kIdx).dptr<IType>(); DType* in_data = in.data().dptr<DType>(); DType* out_data = out.data().dptr<DType>(); RType offset = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)), Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s)); mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s), Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s); mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s), Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s); }); }); }); } /*! * \brief slice a CSRNDArray for two dimensions */ struct SliceDimTwoCsrAssign { /*! * \brief This function slices a CSRNDArray on axis one between begin_col and end_col * \param i loop index * \param out_idx output csr ndarray column indices * \param out_data output csr ndarray data * \param out_indptr output csr ndarray row index pointer * \param in_idx input csr ndarray column indices * \param in_data input csr ndarray data * \param in_indptr input csr ndarray row index pointer * \param begin_col begin column indice * \param end_col end column indice */ template<typename IType, typename RType, typename DType> MSHADOW_XINLINE static void Map(int i, IType* out_idx, DType* out_data, const RType* out_indptr, const IType* in_idx, const DType* in_data, const RType* in_indptr, const int begin_col, const int end_col) { RType ind = out_indptr[i]; for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) { // indices of CSRNDArray are in ascending order per row if (in_idx[j] >= end_col) { break; } else if (in_idx[j] >= begin_col) { out_idx[ind] = in_idx[j] - begin_col; out_data[ind] = in_data[j]; ind++; } } } }; /* * Slice a CSR NDArray for two dimensions */ template<typename xpu> void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out); template<typename xpu> void SliceCsrImpl(const SliceParam &param, const OpContext& ctx, const NDArray &in, OpReqType req, const NDArray &out) { if (req == kNullOp) return; CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported"; CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported"; const mxnet::TShape ishape = in.shape(); const mxnet::TShape oshape = out.shape(); int N = ishape.ndim(); mxnet::TShape begin(N, -1), end(N, -1); for (int i = 0; i < N; ++i) { int s = 0; if (i < param.begin.ndim() && param.begin[i]) { s = *param.begin[i]; if (s < 0) s += ishape[i]; } begin[i] = s; end[i] = s + oshape[i]; } switch (N) { case 1: { SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out); break; } case 2: { SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out); break; } default: LOG(FATAL) << "CSR is only for 2-D shape"; break; } } template<typename xpu> void SliceEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs.size(), 1); CHECK_EQ(outputs.size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); auto in_stype = inputs[0].storage_type(); if (in_stype == kCSRStorage) { SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]); } else { LOG(FATAL) << "Slice not implemented for storage type" << in_stype; } } template<int ndim> inline bool GetIndexRange(const mxnet::TShape& dshape, const mxnet::Tuple<dmlc::optional<index_t>>& param_begin, const mxnet::Tuple<dmlc::optional<index_t>>& param_end, const mxnet::Tuple<dmlc::optional<index_t>>& param_step, common::StaticArray<index_t, ndim>* begin, common::StaticArray<index_t, ndim>* end, common::StaticArray<index_t, ndim>* step) { // Function returns false if output is zero-sized, true otherwise. bool zero_size_shape = false; CHECK_NE(dshape.ndim(), 0U); CHECK_LE(param_begin.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_LE(param_end.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_EQ(param_begin.ndim(), param_end.ndim()) << "begin and end must have the same length"; CHECK_EQ(ndim, dshape.ndim()) << "Static array size=" << ndim << " is not equal to data shape ndim=" << dshape.ndim(); if (param_step.ndim() > 0) { CHECK_EQ(param_step.ndim(), param_begin.ndim()) << "step and begin must have the same length"; } for (int i = 0; i < param_begin.ndim(); ++i) { index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1; CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0"; index_t b = 0, e = 0; const index_t len = dshape[i]; if (len > 0) { b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0); e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len); if (b < 0) { b += len; } if (e < 0 && param_end[i].has_value()) { e += len; } // move the begin and end to correct position for calculating dim size b = (b < 0 && s > 0) ? 0 : b; b = (b > len - 1 && s < 0) ? len - 1 : b; // if the start value lead to empty tensor under step s, use -1 for indication b = (b < 0 || b > len - 1) ? -1 : b; e = e > -1 ? e : -1; e = e > len ? len : e; } else if (len == 0) { b = 0; e = 0; } (*begin)[i] = b; (*end)[i] = e; (*step)[i] = s; // checking begin==end if (b == e) { zero_size_shape = true; } } for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) { (*begin)[i] = 0; (*end)[i] = dshape[i]; (*step)[i] = 1; } return zero_size_shape; } inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape, const index_t i, const index_t b, const index_t e, const index_t s, mxnet::TShape* oshape) { if (!mxnet::dim_size_is_known(dshape, i)) { (*oshape)[i] = -1; return; } if (e != b && b >= 0) { if (s > 0) { (*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0; } else { (*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0; } } else { (*oshape)[i] = 0; } } inline bool SliceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0"; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); mxnet::TShape oshape = dshape; MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const index_t b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape); } }) SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(dshape) && shape_is_known(oshape); } template<int ndim, int req, typename xpu> struct slice_forward; template<int ndim, int req> struct slice_forward<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[i], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } }; template<int ndim, int req> struct slice_forward<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t out_offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[out_offset++], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } } }; template<typename xpu> void SliceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (out.Size() == 0) return; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { size_t num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<int ndim, int req, typename xpu> struct slice_assign; template<int ndim, int req> struct slice_assign<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[offset++]); } } }; template<int ndim, int req> struct slice_assign<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[i]); } }; template<typename xpu> void SliceOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_backward does not support kWriteInplace"; } if (ograd.Size() == 0) return; MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape); } }) SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<typename xpu> void SliceAssignOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs CHECK_EQ(outputs.size(), 1U); if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& val = inputs[1]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace"; } const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspace needs no operation. } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = val.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= val.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), val.dptr<DType>(), out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step); }) }) }) } struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> { double scalar; mxnet::Tuple<dmlc::optional<index_t>> begin, end; mxnet::Tuple<dmlc::optional<index_t>> step; DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) { DMLC_DECLARE_FIELD(scalar) .set_default(0) .describe("The scalar value for assignment."); DMLC_DECLARE_FIELD(begin) .describe("starting indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(end) .describe("ending indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(step) .set_default(mxnet::Tuple<dmlc::optional<index_t>>()) .describe("step for the slice operation, supports negative values."); } }; inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<int ndim> struct slice_assign_scalar { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val, const OpReqType req, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val); } } }; template<typename xpu> void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace"; } mxnet::TShape vshape = data.shape_; const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspaced needs no operation. } for (index_t i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape); } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0], out.dptr<DType>(), static_cast<DType>(param.scalar), req[0], out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step); }) }) } struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> { int axis; index_t begin; dmlc::optional<index_t> end; DMLC_DECLARE_PARAMETER(SliceAxisParam) { DMLC_DECLARE_FIELD(axis) .describe("Axis along which to be sliced, supports negative indexes."); DMLC_DECLARE_FIELD(begin) .describe("The beginning index along the axis to be sliced, " " supports negative indexes."); DMLC_DECLARE_FIELD(end) .describe("The ending index along the axis to be sliced, " " supports negative indexes."); } }; inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape, int* axis, index_t* begin, index_t* end) { *axis = param.axis; if (*axis < 0) { *axis += ishape.ndim(); } CHECK(*axis < ishape.ndim() && *axis >= 0) << "Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" << param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis; index_t axis_size = static_cast<index_t>(ishape[*axis]); *begin = param.begin; *end = -1; if (*begin < 0) { *begin += axis_size; } if (axis_size > 0) { if (!static_cast<bool>(param.end)) { *end = axis_size; } else { *end = param.end.value(); if (*end < 0) { *end += axis_size; } } CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size; CHECK((*begin < *end)) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; } else { *begin = 0; *end = 0; } CHECK(*end >= 0) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin; } inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(ishape)) return false; int axis; index_t begin, end; GetSliceAxisParams(param, ishape, &axis, &begin, &end); if (!mxnet::dim_size_is_known(ishape, axis)) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return false; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = static_cast<index_t>(end - begin); } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); return shape_is_known(shape); } template<typename xpu> void SliceAxis(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow::expr; const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> in = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> out = outputs[0].FlatTo2D<xpu, DType>(s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> in = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> out = outputs[0].FlatTo3D<xpu, DType>(axis, s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } } // Backward pass of broadcast over the given axis template<typename xpu> void SliceAxisGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (outputs[0].shape_.Size() == 0) { return; } const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); using namespace mshadow::op; using namespace mshadow::expr; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].shape_.ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> ograd = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> igrad = outputs[0].FlatTo2D<xpu, DType>(s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> ograd = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> igrad = outputs[0].FlatTo3D<xpu, DType>(axis, s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } } struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> { mxnet::Tuple<int> axes; DMLC_DECLARE_PARAMETER(SliceLikeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>()) .describe("List of axes on which input data will be sliced according to the " "corresponding size of the second input. By default will slice on " "all axes. Negative axes are supported."); } }; inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& from_shape = (*in_attrs)[1]; if (param.axes.ndim() == 0) { CHECK_EQ(ishape.ndim(), from_shape.ndim()) << "By default slice_axis performs slice on all axes, but ndim mismatch " "for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim(); for (int i = 0; i < ishape.ndim(); ++i) { CHECK_GE(ishape[i], from_shape[i]) << "Slice axis " << i << " with size " << from_shape[i] << "exceeds limit of input with size " << ishape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape); } else { mxnet::TShape shape(ishape); for (int i = 0; i < param.axes.ndim(); ++i) { int axis = param.axes[i]; if (axis < 0) { axis += ishape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << param.axes[i] << " too small"; CHECK_GT(ishape.ndim(), axis) << "Slice axis: " << axis << " exceeds first input: " << ishape.ndim(); CHECK_GT(from_shape.ndim(), axis) << "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim(); shape[axis] = from_shape[axis]; CHECK_GE(ishape[axis], from_shape[axis]) << "Slice axis " << axis << " with size " << from_shape[axis] << "exceeds limit of input with size " << ishape[axis]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return true; } inline void SliceLikeInferRanges(const mxnet::TShape& dshape, const mxnet::TShape& fshape, const mxnet::Tuple<int>& axes, mxnet::Tuple<dmlc::optional<index_t>>* param_begin, mxnet::Tuple<dmlc::optional<index_t>>* param_end, mxnet::Tuple<dmlc::optional<index_t>>* param_step) { std::vector<dmlc::optional<index_t>> pb(dshape.ndim()); std::vector<dmlc::optional<index_t>> pe(dshape.ndim()); std::vector<dmlc::optional<index_t>> ps(dshape.ndim()); if (axes.ndim() == 0) { for (int i = 0; i < dshape.ndim(); ++i) { pb[i] = 0; pe[i] = fshape[i]; ps[i] = 1; } } else { for (int i = 0; i < axes.ndim(); ++i) { int axis = axes[i]; if (axis < 0) { axis += dshape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << axes[i] << " too small"; CHECK_LT(axis, dshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << dshape.ndim(); CHECK_LT(axis, fshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << fshape.ndim(); pb[axis] = 0; pe[axis] = fshape[axis]; ps[axis] = 1; } } *param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end()); *param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end()); *param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end()); } template<typename xpu> void SliceLikeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow::expr; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; const mxnet::TShape& ishape = data.shape_; const mxnet::TShape& from_shape = inputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<typename xpu> void SliceLikeBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U); CHECK_EQ(req.size(), 2U); using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); if (req[1] != kNullOp && req[1] != kAddTo) { Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients. } if (req[0] == kNullOp) return; const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_like_backward does not support kWriteInplace"; } const mxnet::TShape& ishape = ograd.shape_; const mxnet::TShape& from_shape = outputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } struct ClipParam : public dmlc::Parameter<ClipParam> { real_t a_min, a_max; DMLC_DECLARE_PARAMETER(ClipParam) { DMLC_DECLARE_FIELD(a_min) .describe("Minimum value"); DMLC_DECLARE_FIELD(a_max) .describe("Maximum value"); } }; struct clip { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = a_max; } else if (data < a_min) { out[i] = a_min; } else { out[i] = data; } } }; struct clip_grad { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = 0; } else if (data < a_min) { out[i] = 0; } else { out[i] = grad[i]; } } }; template<typename xpu> void Clip(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), param.a_min, param.a_max); }); } template<typename xpu> void ClipEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs[0].dtype(), outputs[0].dtype()); CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type()); CHECK_NE(inputs[0].storage_type(), kDefaultStorage); UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>); } template<typename xpu> void ClipGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max); }); } /*! * \brief The parameters of the repeat operator include * the number of repeating time and axis (optional). * The parameters will be later used to deduce the * output ndarray shape in bool RepeatShape() function. */ struct RepeatParam : public dmlc::Parameter<RepeatParam> { int repeats = 1; dmlc::optional<int> axis; DMLC_DECLARE_PARAMETER(RepeatParam) { DMLC_DECLARE_FIELD(repeats) .describe("The number of repetitions for each element."); DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<int>()) .describe("The axis along which to repeat values." " The negative numbers are interpreted counting from the backward." " By default, use the flattened input array," " and return a flat output array."); } }; /*! * \brief Helper function for getting user input params for the operator repeat. * Sanity check the user input values. */ inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape, int* repeats, dmlc::optional<int>* axisOpt) { *repeats = param.repeats; CHECK_GE(*repeats, 0) << "repeats cannot be a negative number"; *axisOpt = param.axis; if (static_cast<bool>(*axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt->value(); if (axis < 0) { axis += ndims; } CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds"; } } inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& ishape = (*in_attrs)[0]; int repeats = 0; dmlc::optional<int> axisOpt; GetRepeatParams(param, ishape, &repeats, &axisOpt); // If 0 repeats, return an empty 1-dim, 0-size array if (0 == repeats) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0)); return true; } // If repeats > 0, multiply the size of the corresponding axis by repeats if (static_cast<bool>(axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt.value(); if (axis < 0) { axis += ndims; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = repeats * ishape[i]; } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats mxnet::TShape shape(1, ishape.Size() * repeats); SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return shape_is_known(out_attrs->at(0)); } inline bool RepeatOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the funcitonality * of operator repeat. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp( const mxnet::TShape& ishape, const dmlc::optional<int>& axisOpt, const int repeats) { if (static_cast<bool>(axisOpt)) { int axis = axisOpt.value(); int ndim = ishape.ndim(); if (axis < 0) { axis += ndim; } CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis"; // reshape the input tensor by adding a dim at the (axis+1)-th dim mxnet::TShape rshape(ishape.ndim()+1, 1); // the shape we want to broadcast to mxnet::TShape bshape(rshape.ndim(), 1); int i = 0; while (i <= axis) { rshape[i] = bshape[i] = ishape[i]; ++i; } rshape[i] = 1; bshape[i] = repeats; while (i < ishape.ndim()) { rshape[i+1] = ishape[i]; bshape[i+1] = ishape[i]; ++i; } return std::make_pair(rshape, bshape); } else { // axis is not input by user // reshape the tensor into shape (ishape.Size(), 1) // then add one dim at axis = 1 and broadcast to // shape (ishape.Size(), repeats) mxnet::TShape rshape(2, 1); rshape[0] = ishape.Size(); rshape[1] = 1; mxnet::TShape bshape(2, 1); bshape[0] = rshape[0]; bshape[1] = repeats; return std::make_pair(rshape, bshape); } } template<typename xpu> void RepeatOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TBlob& iTBlob = inputs[0]; const mxnet::TShape& ishape = iTBlob.shape_; if (!shape_is_known(ishape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, ishape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = \ ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void RepeatOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const mxnet::TShape& oshape = outputs[0].shape_; if (!shape_is_known(oshape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, oshape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } struct TileParam : public dmlc::Parameter<TileParam> { mxnet::Tuple<int> reps; DMLC_DECLARE_PARAMETER(TileParam) { DMLC_DECLARE_FIELD(reps) .describe("The number of times for repeating the tensor a. Each dim size of reps" " must be a positive integer." " If reps has length d, the result will have dimension of max(d, a.ndim);" " If a.ndim < d, a is promoted to be d-dimensional by prepending new axes." " If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it."); } }; inline bool TileOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const TileParam& param = nnvm::get<TileParam>(attrs.parsed); const mxnet::TShape& ishape = (*in_attrs)[0]; if (!shape_is_known(ishape)) { return false; } const mxnet::Tuple<int>& reps = param.reps; // If reps is empty, return a identical input array if (reps.ndim() == 0) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return true; } mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = oshape.ndim() - 1; i >= 0; --i) { if (i1 >= 0 && i2 >= 0) { oshape[i] = ishape[i1--] * reps[i2--]; } else if (i1 >= 0) { oshape[i] = ishape[i1--]; } else if (i2 >= 0) { oshape[i] = reps[i2--]; } } // If reps contains 0s, oshape is a zero-size shape. // Need to distinguish between np_shape mode and legacy mode. if (!Imperative::Get()->is_np_shape()) { common::ConvertToNumpyShape(&oshape); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } inline bool TileOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the functionality * of operator tile. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp( const mxnet::TShape& ishape, const mxnet::Tuple<int>& reps) { if (reps.ndim() == 0) { return std::make_pair(ishape, ishape); } // The shape we want to broadcast to mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1); // The shape of the input tensor after adding new axes before each dim mxnet::TShape rshape(bshape.ndim(), 1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = bshape.ndim() - 1; i >= 0; --i) { if (0 == (i & 1)) { bshape[i] = (i2 >= 0? reps[i2--] : 1); rshape[i] = 1; } else { rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1); } } return std::make_pair(rshape, bshape); } /*! * \brief Implementation of tiling the input tensor a based * on the user-input shape, reps. * If a.ndim < reps.ndim, new axes are pre-pended to a. For example, * the input tensor has shape (3,), and the reps is (2, 4); the input * tensor would be reshaped to (1, 3). * If a.ndim > reps.ndim, pre-pending 1's to reps. For example, * the input tensor has shape (2, 3, 4, 5), and reps is (2, 2); * the reps would be changed to (1, 1, 2, 2). * Suppose we have a.ndim = reps.ndim now. To achieve tiling, * we utilize the operator broadcast_to. For example, for a tensor * of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape * the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding * one axis before each dimension. Then, we want to broadcast * the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final * output tensor would have shape (2*2, 8*3, 9*4, 3*5). */ template<typename xpu> void TileOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& ishape = inputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void TileOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& oshape = outputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } struct ReverseParam : public dmlc::Parameter<ReverseParam> { mxnet::Tuple<int> axis; DMLC_DECLARE_PARAMETER(ReverseParam) { DMLC_DECLARE_FIELD(axis) .describe("The axis which to reverse elements."); } }; #define REVERSE_MAX_DIM 10U struct reverse { MSHADOW_XINLINE static index_t ReverseIndex(index_t idx, index_t nreversedim, const index_t * stride_, const index_t * trailing_) { index_t outputIndex = idx; for (index_t i = 0; i < nreversedim; ++i) { const index_t low = outputIndex % trailing_[i]; index_t high = outputIndex / trailing_[i]; const index_t x = high%stride_[i]; high /= stride_[i]; outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low; } return outputIndex; } #ifdef __CUDACC__ template<typename DType> __device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { __shared__ index_t stride_share[REVERSE_MAX_DIM]; __shared__ index_t trailing_share[REVERSE_MAX_DIM]; if (threadIdx.x < REVERSE_MAX_DIM) { stride_share[threadIdx.x] = stride_[threadIdx.x]; trailing_share[threadIdx.x] = trailing_[threadIdx.x]; } __syncthreads(); index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share); dst[new_idx] = src[index]; } #else template<typename DType> MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_); dst[new_idx] = src[index]; } #endif }; template<typename xpu> void ReverseOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM); Stream<xpu> *s = ctx.get_stream<xpu>(); const mxnet::TShape& ishape = inputs[0].shape_; std::vector<index_t> stride_(param.axis.ndim()); std::vector<index_t> trailing_(param.axis.ndim()); index_t reverse_index = 0; for (int axis : param.axis) { CHECK_LT(axis, ishape.ndim()); stride_[reverse_index] = ishape[axis]; trailing_[reverse_index] = 1; for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) { trailing_[reverse_index] *= ishape[i2]; } reverse_index++; } #ifdef __CUDACC__ mshadow::Tensor<xpu, 1, uint8_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, uint8_t>( mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s); auto stride_workspace = workspace.dptr_; auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t); cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()), stride_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()), trailing_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); #endif #ifdef __CUDACC__ MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace)); }); #else MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), stride_.data(), trailing_.data()); }); #endif } struct StackParam : public dmlc::Parameter<StackParam> { int axis; int num_args; DMLC_DECLARE_PARAMETER(StackParam) { DMLC_DECLARE_FIELD(axis) .set_default(0) .describe("The axis in the result array along which the input arrays are stacked."); DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) .describe("Number of inputs to be stacked."); } }; inline bool StackOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const StackParam& param = dmlc::get<StackParam>(attrs.parsed); mxnet::TShape dshape; for (const mxnet::TShape& i : (*in_attrs)) { shape_assign(&dshape, i); } if (!shape_is_known(dshape)) return false; mxnet::TShape oshape(dshape.ndim() + 1, -1); int axis = CheckAxis(param.axis, oshape.ndim()); for (int i = 0; i < axis; ++i) { oshape[i] = dshape[i]; } oshape[axis] = param.num_args; for (index_t i = axis + 1; i < oshape.ndim(); ++i) { oshape[i] = dshape[i-1]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } template<typename xpu> void StackOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, outputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > data(inputs.size()); Tensor<xpu, 3, DType> out; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= outputs[0].shape_[i]; } for (int i = axis + 1; i < outputs[0].ndim(); ++i) { trailing *= outputs[0].shape_[i]; } size_t mid = outputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < inputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Concatenate(data, &out, 1, req[0]); }) } template<typename xpu> void StackOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size()); Tensor<xpu, 3, DType> grad; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= inputs[0].shape_[i]; } for (int i = axis + 1; i < inputs[0].ndim(); ++i) { trailing *= inputs[0].shape_[i]; } size_t mid = inputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < outputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Split(grad, &grad_in, 1, req); }) } struct SqueezeParam : public dmlc::Parameter<SqueezeParam> { dmlc::optional<mxnet::Tuple<int>> axis; DMLC_DECLARE_PARAMETER(SqueezeParam) { DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<mxnet::Tuple<int>>()) .describe("Selects a subset of the single-dimensional entries in the shape." " If an axis is selected with shape entry greater than one, an error is raised."); } }; // Given a shape that may have dim size equal to 0, // move all the zeros to the last of the shape array // and keep the relative order of the non-zero values. // Returns the new shape size after moving all zeros to the end. inline size_t SqueezeShapeHelper(mxnet::TShape* shape) { CHECK(shape != nullptr); size_t count = 0; for (int i = 0; i < shape->ndim(); ++i) { if ((*shape)[i] == -1) { ++count; } else { std::swap((*shape)[i], (*shape)[i-count]); } } return shape->ndim() - count; } inline bool SqueezeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = in_attrs->at(0); const int dndim = dshape.ndim(); if (!shape_is_known(dshape)) return false; mxnet::TShape oshape = dshape; if (param.axis.has_value()) { // preprocess axis mxnet::Tuple<int> axes = param.axis.value(); for (int i = 0; i < axes.ndim(); ++i) { if (axes[i] < 0) { axes[i] += dndim; CHECK_GE(axes[i], 0) << "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim; } CHECK_LT(axes[i], dndim) << "axis " << axes[i] << " is out of bounds for array of dimension " << dndim; CHECK_EQ(dshape[axes[i]], 1) << "cannot select an axis to squeeze out which has size=" << dshape[axes[i]] << " not equal to one"; CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis"; oshape[axes[i]] = -1; } } else { for (int i = 0; i < oshape.ndim(); ++i) { if (oshape[i] == 1) oshape[i] = -1; } } size_t oshape_size = SqueezeShapeHelper(&oshape); if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1) oshape[0] = 1; oshape_size = 1; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size)); return true; } struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> { int block_size; DMLC_DECLARE_PARAMETER(DepthToSpaceParam) { DMLC_DECLARE_FIELD(block_size) .describe("Blocks of [block_size. block_size] are moved"); } }; inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor"; mxnet::TShape expected_out(4, -1); mxnet::TShape& in_shape = in_attrs->at(0); int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_EQ(in_shape[1] % (block * block), 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:1(depth dimension) should be a multiple of 'block^2'"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] / (block * block); int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] * block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function updates the value of input index from where the data element * needs to be fetched and written out to the ith location in output tensor * \param index_position index within offset array to get offset of given dimension * \param dim_size size of current dimension * \param idx output tensor index * \param inp_index index within input tensor from where value is retrieved * \param offset_arr array containing the linear offset of input tensor */ MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx, index_t *inp_index, const index_t* offset_arr) { index_t next_idx_val = *idx / dim_size; *inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position]; *idx = next_idx_val; } /*! * \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct depth_to_space_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[3]; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2]; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1] / (block * block); update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing depth_to_space operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_depth_to_space { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * size[3]; offset_arr[3] = offset_arr[4] * size[2]; offset_arr[2] = offset_arr[3] * size[1] / (block * block); offset_arr[1] = offset_arr[2] * block; offset_arr[0] = offset_arr[1] * block; } }; template<typename xpu> void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<depth_to_space_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor"; mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1); mxnet::TShape& in_shape = in_attrs->at(0); int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_EQ(in_shape[2] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:2(1st Space dimension) should be a multiple of 'block' "; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; CHECK_EQ(in_shape[3] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:3(2nd space dimension) should be a multiple of 'block' "; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] * block * block; int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] / block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct space_to_depth_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = size[3] / block; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2] / block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1]; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing space_to_depth operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_space_to_depth { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * block; offset_arr[3] = offset_arr[4] * size[3] / block; offset_arr[2] = offset_arr[3] * block; offset_arr[1] = offset_arr[2] * size[2] / block; offset_arr[0] = offset_arr[1] * size[1]; } }; template<typename xpu> void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<space_to_depth_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } namespace split_enum { enum SplitOpInputs {kData}; } // namespace split_enum struct SplitParam : public dmlc::Parameter<SplitParam> { mxnet::TShape indices; int axis; bool squeeze_axis; int sections; DMLC_DECLARE_PARAMETER(SplitParam) { DMLC_DECLARE_FIELD(indices) .describe("Indices of splits. The elements should denote the boundaries of at which split" " is performed along the `axis`."); DMLC_DECLARE_FIELD(axis).set_default(1) .describe("Axis along which to split."); DMLC_DECLARE_FIELD(squeeze_axis).set_default(0) .describe("If true, Removes the axis with length 1 from the shapes of the output arrays." " **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1" " only along the `axis` which it is split." " Also `squeeze_axis` can be set to ``true``" " only if ``input.shape[axis] == num_outputs``."); DMLC_DECLARE_FIELD(sections).set_default(0) .describe("Number of sections if equally splitted. Default to 0 which means split by indices."); } }; // struct SplitParam inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) { mxnet::TShape indices(sections+1, -1); indices[0] = 0; int64_t section_size = ishape[axis] / sections; for (int i = 0; i < sections; ++i) { indices[i+1] = section_size * (i + 1); } return indices; } inline bool SplitOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); int dtype = (*in_attrs)[0]; CHECK_NE(dtype, -1) << "First input must have specified type"; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); out_attrs->clear(); int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim(); for (int i = 0; i < num_outputs; ++i) { out_attrs->push_back(dtype); } return true; } inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs, const int real_axis) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); mxnet::TShape dshape = in_attrs->at(split_enum::kData); mxnet::TShape ishape = in_attrs->at(split_enum::kData); const mxnet::TShape indices = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim(); // Pre-compute squeezed output shape for future usage mxnet::TShape squeezed_dshape = dshape; for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) { squeezed_dshape[d] = squeezed_dshape[d+1]; } squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]); // Assign shape to every output for (int i = 0; i < num_outputs; ++i) { int start = indices[i]; int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis]; if (ishape[real_axis] == 0U) { end = start; } else { CHECK(start <= end) << "start " << start << " is not less than end " << end << "for subarray " << i; CHECK(end <= ishape[real_axis]) << "end " << end << " is no less than the size of the axis " << ishape[real_axis]; } dshape[real_axis] = (end - start); if (param.squeeze_axis) { CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start; SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape); } } mxnet::TShape back_calculate_dshape = ishape; back_calculate_dshape[real_axis] = 0; for (int d = 0; d < real_axis; ++d) { back_calculate_dshape[d] = (*out_attrs)[0][d]; } if (param.squeeze_axis) { back_calculate_dshape[real_axis] = num_outputs; } else { for (int i = 0; i < num_outputs; ++i) { back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis]; } } for (int d = real_axis + 1; d < ishape.ndim(); ++d) { if (param.squeeze_axis) { back_calculate_dshape[d] = (*out_attrs)[0][d - 1]; } else { back_calculate_dshape[d] = (*out_attrs)[0][d]; } } SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape); return true; } inline bool SplitOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); mxnet::TShape dshape = in_attrs->at(split_enum::kData); if (!mxnet::ndim_is_known(dshape)) return false; if (param.axis >= 0) { CHECK_LT(param.axis, dshape.ndim()); } else { CHECK_LT(param.axis + dshape.ndim(), dshape.ndim()); } int real_axis = param.axis; if (real_axis < 0) { real_axis += dshape.ndim(); } return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis); } struct SplitKernel { /*! * \brief Map function for forward split_v2 operator * \param i global thread id * \param in_data ptr to input buffer * \param out_data ptr to ptr of outputs buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, const DType *in_data, DType** out_data, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t target = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; target = section++) {} DType* target_data = out_data[target]; const size_t mid_idx = idx - indices[target]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[target + 1] - indices[target]; const size_t target_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; target_data[target_idx] = in_data[i]; } }; struct ConcatenateKernel { /*! * \brief Map function for backward split_v2 operator * \param i global thread id * \param out_grad ptr to ptr of out grads buffer * \param in_grad ptr to input grad buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, DType** out_grad, DType* in_grad, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t src = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; src = section++) {} DType* src_grad = out_grad[src]; const size_t mid_idx = idx - indices[src]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[src + 1] - indices[src]; const size_t src_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; in_grad[i] = src_grad[src_idx]; } }; template<typename xpu> inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& input_data = inputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_data.ndim()); size_t mid = input_data.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_data.shape_[i]; } for (int i = real_axis + 1; i < input_data.ndim(); ++i) { trailing *= input_data.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_data.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, { std::vector<DType*> output_data; for (const TBlob& data : outputs) { output_data.push_back(data.dptr<DType>()); } workspace_size += output_data.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(output_data.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<SplitKernel, xpu>::Launch( s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_, indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()); const TBlob& input_data = inputs[split_enum::kData]; int real_axis = param.axis; if (real_axis < 0) { real_axis += input_data.ndim(); } SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } template<typename xpu> inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); TBlob input_grad = outputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_grad.ndim()); size_t mid = input_grad.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_grad.shape_[i]; } for (int i = real_axis + 1; i < input_grad.ndim(); ++i) { trailing *= input_grad.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_grad.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, { std::vector<DType*> out_grads; for (const TBlob& output_grad : inputs) { out_grads.push_back(output_grad.dptr<DType>()); } workspace_size += out_grads.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(inputs.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<ConcatenateKernel, xpu>::Launch( s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(), indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()) << "out grad vector size mush match the output size"; CHECK_EQ(outputs.size(), 1U); int real_axis = param.axis; if (real_axis < 0) { real_axis += outputs[split_enum::kData].ndim(); } SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) { const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); return (param.sections > 0) ? param.sections : param.indices.ndim(); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::TransposeParam> { size_t operator()(const mxnet::op::TransposeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axes); return ret; } }; template<> struct hash<mxnet::op::ReshapeParam> { size_t operator()(const mxnet::op::ReshapeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.target_shape); ret = dmlc::HashCombine(ret, val.keep_highest); ret = dmlc::HashCombine(ret, val.shape); ret = dmlc::HashCombine(ret, val.reverse); return ret; } }; template<> struct hash<mxnet::op::ExpandDimParam> { size_t operator()(const mxnet::op::ExpandDimParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
Surface_tools.c
/* Generated by Cython 0.27.3 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_3" #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__Surface_tools #define __PYX_HAVE_API__Surface_tools #include <string.h> #include <stdio.h> #include "pythread.h" #include <stdlib.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "Surface_tools.pyx", "stringsource", "type.pxd", "bool.pxd", "complex.pxd", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; struct __pyx_opt_args_13Surface_tools_reshape; /* "Surface_tools.pyx":139 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef reshape(sprite_, factor_=1.0): # <<<<<<<<<<<<<< * """ * RESHAPE ANIMATION OR IMAGE */ struct __pyx_opt_args_13Surface_tools_reshape { int __pyx_n; PyObject *factor_; }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":277 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":328 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":953 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":328 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":953 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* SetItemInt.proto */ #define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, int wraparound, int boundscheck); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_unsigned_char(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_unsigned_char(const char *itemp, PyObject *obj); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE unsigned char __Pyx_PyInt_As_unsigned_char(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_unsigned_char(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_unsigned_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'cpython.version' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.exc' */ /* Module declarations from 'cpython.module' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'cpython.tuple' */ /* Module declarations from 'cpython.list' */ /* Module declarations from 'cpython.sequence' */ /* Module declarations from 'cpython.mapping' */ /* Module declarations from 'cpython.iterator' */ /* Module declarations from 'cpython.number' */ /* Module declarations from 'cpython.int' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.bool' */ static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; /* Module declarations from 'cpython.long' */ /* Module declarations from 'cpython.float' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.complex' */ static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; /* Module declarations from 'cpython.string' */ /* Module declarations from 'cpython.unicode' */ /* Module declarations from 'cpython.dict' */ /* Module declarations from 'cpython.instance' */ /* Module declarations from 'cpython.function' */ /* Module declarations from 'cpython.method' */ /* Module declarations from 'cpython.weakref' */ /* Module declarations from 'cpython.getargs' */ /* Module declarations from 'cpython.pythread' */ /* Module declarations from 'cpython.pystate' */ /* Module declarations from 'cpython.cobject' */ /* Module declarations from 'cpython.oldbuffer' */ /* Module declarations from 'cpython.set' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.bytes' */ /* Module declarations from 'cpython.pycapsule' */ /* Module declarations from 'cpython' */ /* Module declarations from 'Surface_tools' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static PyObject *__pyx_f_13Surface_tools_make_transparent(PyObject *, int, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_13Surface_tools_reshape(PyObject *, int __pyx_skip_dispatch, struct __pyx_opt_args_13Surface_tools_reshape *__pyx_optional_args); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_unsigned_char = { "unsigned char", NULL, sizeof(unsigned char), { 0 }, 0, IS_UNSIGNED(unsigned char) ? 'U' : 'I', IS_UNSIGNED(unsigned char), 0 }; #define __Pyx_MODULE_NAME "Surface_tools" extern int __pyx_module_is_main_Surface_tools; int __pyx_module_is_main_Surface_tools = 0; /* Implementation of 'Surface_tools' */ static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_RGBA[] = "RGBA"; static const char __pyx_k_Rect[] = "Rect"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_copy[] = "copy"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mask[] = "mask"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_alpha[] = "alpha_"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_empty[] = "empty"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_image[] = "image"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_scale[] = "scale"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_uint8[] = "uint8"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_factor[] = "factor_"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_pygame[] = "pygame"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_rotate[] = "rotate"; static const char __pyx_k_sprite[] = "sprite_"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_HWACCEL[] = "HWACCEL"; static const char __pyx_k_Surface[] = "Surface"; static const char __pyx_k_Vector2[] = "Vector2"; static const char __pyx_k_array3d[] = "array3d"; static const char __pyx_k_asarray[] = "asarray"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_image_2[] = "image_"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_RLEACCEL[] = "RLEACCEL"; static const char __pyx_k_SRCALPHA[] = "SRCALPHA"; static const char __pyx_k_get_size[] = "get_size"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pixels3d[] = "pixels3d"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_get_width[] = "get_width"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_frombuffer[] = "frombuffer"; static const char __pyx_k_get_height[] = "get_height"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_array_alpha[] = "array_alpha"; static const char __pyx_k_pygame_math[] = "pygame.math"; static const char __pyx_k_smoothscale[] = "smoothscale"; static const char __pyx_k_pixels_alpha[] = "pixels_alpha"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_BLEND_RGB_ADD[] = "BLEND_RGB_ADD"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_Invalid_surface[] = "\nInvalid surface."; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pygame_surfarray[] = "pygame.surfarray"; static const char __pyx_k_pygame_transform[] = "pygame.transform"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_Array_shape_not_understood[] = "\nArray shape not understood."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_Argument_factor__must_be_float[] = "\nArgument factor_ must be float or int got %s "; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Argument_factor__incorrect_type[] = "\nArgument factor_ incorrect type must be float, int or tuple got %s "; static const char __pyx_k_Argument_factor__must_be_a_list[] = "\nArgument factor_ must be a list or tuple got %s "; static const char __pyx_k_Pygame_library_is_missing_on_yo[] = "\n<Pygame> library is missing on your system.\nTry: \n C:\\pip install pygame on a window command prompt."; static const char __pyx_k_Surface_without_per_pixel_infor[] = "\nSurface without per-pixel information."; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Argument_factor__incorrect_type; static PyObject *__pyx_kp_s_Argument_factor__must_be_a_list; static PyObject *__pyx_kp_s_Argument_factor__must_be_float; static PyObject *__pyx_kp_s_Array_shape_not_understood; static PyObject *__pyx_n_s_BLEND_RGB_ADD; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_HWACCEL; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_kp_s_Invalid_surface; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_kp_s_Pygame_library_is_missing_on_yo; static PyObject *__pyx_n_s_RGBA; static PyObject *__pyx_n_s_RLEACCEL; static PyObject *__pyx_n_s_Rect; static PyObject *__pyx_n_s_SRCALPHA; static PyObject *__pyx_n_s_Surface; static PyObject *__pyx_kp_s_Surface_without_per_pixel_infor; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_Vector2; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha; static PyObject *__pyx_n_s_array3d; static PyObject *__pyx_n_s_array_alpha; static PyObject *__pyx_n_s_asarray; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_copy; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_empty; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_factor; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_frombuffer; static PyObject *__pyx_n_s_get_height; static PyObject *__pyx_n_s_get_size; static PyObject *__pyx_n_s_get_width; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_image; static PyObject *__pyx_n_s_image_2; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mask; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pixels3d; static PyObject *__pyx_n_s_pixels_alpha; static PyObject *__pyx_n_s_pygame; static PyObject *__pyx_n_s_pygame_math; static PyObject *__pyx_n_s_pygame_surfarray; static PyObject *__pyx_n_s_pygame_transform; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_rotate; static PyObject *__pyx_n_s_scale; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_smoothscale; static PyObject *__pyx_n_s_sprite; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_uint8; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_pf_13Surface_tools_make_transparent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_image_, int __pyx_v_alpha_); /* proto */ static PyObject *__pyx_pf_13Surface_tools_2reshape(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sprite_, PyObject *__pyx_v_factor_); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_float_1_0; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_4; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__18; static PyObject *__pyx_slice__19; static PyObject *__pyx_slice__20; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__30; static PyObject *__pyx_codeobj__31; /* "Surface_tools.pyx":42 * @cython.nonecheck(False) * @cython.cdivision(True) * cdef make_array_c_code(unsigned char[:, :, :] rgb_array_c, unsigned char[:, :] alpha_c): # <<<<<<<<<<<<<< * """ * STACK ARRAY RGB VALUES WITH ALPHA CHANNEL. */ static PyObject *__pyx_f_13Surface_tools_make_array_c_code(__Pyx_memviewslice __pyx_v_rgb_array_c, __Pyx_memviewslice __pyx_v_alpha_c) { int __pyx_v_width; int __pyx_v_height; CYTHON_UNUSED PyObject *__pyx_v_e = NULL; __Pyx_memviewslice __pyx_v_new_array = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; int __pyx_v_j; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *(*__pyx_t_8)(PyObject *); int __pyx_t_9; int __pyx_t_10; __Pyx_memviewslice __pyx_t_11 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; unsigned char __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; unsigned char __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; unsigned char __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; unsigned char __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; PyObject *__pyx_t_42 = NULL; __Pyx_RefNannySetupContext("make_array_c_code", 0); /* "Surface_tools.pyx":52 * """ * cdef int width, height * try: # <<<<<<<<<<<<<< * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "Surface_tools.pyx":53 * cdef int width, height * try: * width, height = (<object> rgb_array_c).shape[:2] # <<<<<<<<<<<<<< * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') */ __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_v_rgb_array_c, 3, (PyObject *(*)(char *)) __pyx_memview_get_unsigned_char, (int (*)(char *, PyObject *)) __pyx_memview_set_unsigned_char, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_shape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_t_5, 0, 2, NULL, NULL, &__pyx_slice_, 0, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) { PyObject* sequence = __pyx_t_4; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 53, __pyx_L3_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_5 = PyList_GET_ITEM(sequence, 0); __pyx_t_6 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); #else __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); #endif __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { Py_ssize_t index = -1; __pyx_t_7 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext; index = 0; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L9_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L9_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) __PYX_ERR(0, 53, __pyx_L3_error) __pyx_t_8 = NULL; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L10_unpacking_done; __pyx_L9_unpacking_failed:; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 53, __pyx_L3_error) __pyx_L10_unpacking_done:; } __pyx_t_9 = __Pyx_PyInt_As_int(__pyx_t_5); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_6); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_width = __pyx_t_9; __pyx_v_height = __pyx_t_10; /* "Surface_tools.pyx":52 * """ * cdef int width, height * try: # <<<<<<<<<<<<<< * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":54 * try: * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: # <<<<<<<<<<<<<< * raise ValueError('\nArray shape not understood.') * */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 54, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 54, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError) || __Pyx_PyErr_ExceptionMatches(__pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__pyx_t_10) { __Pyx_AddTraceback("Surface_tools.make_array_c_code", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_4, &__pyx_t_5) < 0) __PYX_ERR(0, 54, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); __pyx_v_e = __pyx_t_4; /* "Surface_tools.pyx":55 * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') # <<<<<<<<<<<<<< * * cdef: */ __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 55, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(0, 55, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "Surface_tools.pyx":52 * """ * cdef int width, height * try: # <<<<<<<<<<<<<< * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "Surface_tools.pyx":58 * * cdef: * unsigned char[:, :, ::1] new_array = empty((width, height, 4), dtype=uint8) # <<<<<<<<<<<<<< * int i=0, j=0 * # EQUIVALENT TO A NUMPY DSTACK */ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_empty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_width); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_height); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = PyTuple_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_t_7, 2, __pyx_int_4); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_uint8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_11 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(__pyx_t_4); if (unlikely(!__pyx_t_11.memview)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_new_array = __pyx_t_11; __pyx_t_11.memview = NULL; __pyx_t_11.data = NULL; /* "Surface_tools.pyx":59 * cdef: * unsigned char[:, :, ::1] new_array = empty((width, height, 4), dtype=uint8) * int i=0, j=0 # <<<<<<<<<<<<<< * # EQUIVALENT TO A NUMPY DSTACK * with nogil: */ __pyx_v_i = 0; __pyx_v_j = 0; /* "Surface_tools.pyx":61 * int i=0, j=0 * # EQUIVALENT TO A NUMPY DSTACK * with nogil: # <<<<<<<<<<<<<< * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "Surface_tools.pyx":62 * # EQUIVALENT TO A NUMPY DSTACK * with nogil: * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): # <<<<<<<<<<<<<< * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ */ __pyx_t_10 = __pyx_v_width; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_12 = (__pyx_t_10 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_12 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(8) private(__pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) schedule(static) #endif /* _OPENMP */ for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_12; __pyx_t_9++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_9); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "Surface_tools.pyx":63 * with nogil: * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): # <<<<<<<<<<<<<< * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ */ __pyx_t_13 = __pyx_v_height; for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_j = __pyx_t_14; /* "Surface_tools.pyx":65 * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ # <<<<<<<<<<<<<< * rgb_array_c[i, j, 2], alpha_c[i, j] * return asarray(new_array) */ __pyx_t_15 = __pyx_v_i; __pyx_t_16 = __pyx_v_j; __pyx_t_17 = 0; __pyx_t_18 = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array_c.data + __pyx_t_15 * __pyx_v_rgb_array_c.strides[0]) ) + __pyx_t_16 * __pyx_v_rgb_array_c.strides[1]) ) + __pyx_t_17 * __pyx_v_rgb_array_c.strides[2]) ))); __pyx_t_19 = __pyx_v_i; __pyx_t_20 = __pyx_v_j; __pyx_t_21 = 1; __pyx_t_22 = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array_c.data + __pyx_t_19 * __pyx_v_rgb_array_c.strides[0]) ) + __pyx_t_20 * __pyx_v_rgb_array_c.strides[1]) ) + __pyx_t_21 * __pyx_v_rgb_array_c.strides[2]) ))); /* "Surface_tools.pyx":66 * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ * rgb_array_c[i, j, 2], alpha_c[i, j] # <<<<<<<<<<<<<< * return asarray(new_array) * */ __pyx_t_23 = __pyx_v_i; __pyx_t_24 = __pyx_v_j; __pyx_t_25 = 2; __pyx_t_26 = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array_c.data + __pyx_t_23 * __pyx_v_rgb_array_c.strides[0]) ) + __pyx_t_24 * __pyx_v_rgb_array_c.strides[1]) ) + __pyx_t_25 * __pyx_v_rgb_array_c.strides[2]) ))); __pyx_t_27 = __pyx_v_i; __pyx_t_28 = __pyx_v_j; __pyx_t_29 = (*((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_alpha_c.data + __pyx_t_27 * __pyx_v_alpha_c.strides[0]) ) + __pyx_t_28 * __pyx_v_alpha_c.strides[1]) ))); /* "Surface_tools.pyx":64 * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ # <<<<<<<<<<<<<< * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ * rgb_array_c[i, j, 2], alpha_c[i, j] */ __pyx_t_30 = __pyx_v_i; __pyx_t_31 = __pyx_v_j; __pyx_t_32 = 0; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_30 * __pyx_v_new_array.strides[0]) ) + __pyx_t_31 * __pyx_v_new_array.strides[1]) )) + __pyx_t_32)) )) = __pyx_t_18; __pyx_t_33 = __pyx_v_i; __pyx_t_34 = __pyx_v_j; __pyx_t_35 = 1; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_33 * __pyx_v_new_array.strides[0]) ) + __pyx_t_34 * __pyx_v_new_array.strides[1]) )) + __pyx_t_35)) )) = __pyx_t_22; __pyx_t_36 = __pyx_v_i; __pyx_t_37 = __pyx_v_j; __pyx_t_38 = 2; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_36 * __pyx_v_new_array.strides[0]) ) + __pyx_t_37 * __pyx_v_new_array.strides[1]) )) + __pyx_t_38)) )) = __pyx_t_26; /* "Surface_tools.pyx":65 * for j in range(height): * new_array[i, j, 0], new_array[i, j, 1], new_array[i, j, 2], \ * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ # <<<<<<<<<<<<<< * rgb_array_c[i, j, 2], alpha_c[i, j] * return asarray(new_array) */ __pyx_t_39 = __pyx_v_i; __pyx_t_40 = __pyx_v_j; __pyx_t_41 = 3; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_39 * __pyx_v_new_array.strides[0]) ) + __pyx_t_40 * __pyx_v_new_array.strides[1]) )) + __pyx_t_41)) )) = __pyx_t_29; } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "Surface_tools.pyx":61 * int i=0, j=0 * # EQUIVALENT TO A NUMPY DSTACK * with nogil: # <<<<<<<<<<<<<< * for i in prange(width, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(height): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L15; } __pyx_L15:; } } /* "Surface_tools.pyx":67 * new_array[i, j, 3] = rgb_array_c[i, j, 0], rgb_array_c[i, j, 1], \ * rgb_array_c[i, j, 2], alpha_c[i, j] * return asarray(new_array) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_asarray); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_new_array, 3, (PyObject *(*)(char *)) __pyx_memview_get_unsigned_char, (int (*)(char *, PyObject *)) __pyx_memview_set_unsigned_char, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_5) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_6}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_6}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_42 = PyTuple_New(1+1); if (unlikely(!__pyx_t_42)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_42); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_42, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_42, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_42, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_42); __pyx_t_42 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "Surface_tools.pyx":42 * @cython.nonecheck(False) * @cython.cdivision(True) * cdef make_array_c_code(unsigned char[:, :, :] rgb_array_c, unsigned char[:, :] alpha_c): # <<<<<<<<<<<<<< * """ * STACK ARRAY RGB VALUES WITH ALPHA CHANNEL. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __PYX_XDEC_MEMVIEW(&__pyx_t_11, 1); __Pyx_XDECREF(__pyx_t_42); __Pyx_AddTraceback("Surface_tools.make_array_c_code", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_e); __PYX_XDEC_MEMVIEW(&__pyx_v_new_array, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "Surface_tools.pyx":75 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef make_transparent(image_, int alpha_): # <<<<<<<<<<<<<< * """ * MODIFY TRANSPARENCY TO A PYGAME SURFACE */ static PyObject *__pyx_pw_13Surface_tools_1make_transparent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_13Surface_tools_make_transparent(PyObject *__pyx_v_image_, int __pyx_v_alpha_, CYTHON_UNUSED int __pyx_skip_dispatch) { PyObject *__pyx_v_rgb = NULL; PyObject *__pyx_v_alpha = NULL; int __pyx_v_w; int __pyx_v_h; __Pyx_memviewslice __pyx_v_new_array = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_alpha_array = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_rgb_array = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; int __pyx_v_j; int __pyx_v_a; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *(*__pyx_t_9)(PyObject *); int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_memviewslice __pyx_t_12 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_13 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_14 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; int __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; PyObject *__pyx_t_42 = NULL; __Pyx_RefNannySetupContext("make_transparent", 0); /* "Surface_tools.pyx":96 * # raise TypeError("\nSurface without per-pixel information.") * * try: # <<<<<<<<<<<<<< * rgb = pixels3d(image_) * except (pygame.error, ValueError): */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "Surface_tools.pyx":97 * * try: * rgb = pixels3d(image_) # <<<<<<<<<<<<<< * except (pygame.error, ValueError): * raise ValueError('\nInvalid surface.') */ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pixels3d); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (!__pyx_t_6) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_image_); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v_image_}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v_image_}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 97, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_INCREF(__pyx_v_image_); __Pyx_GIVEREF(__pyx_v_image_); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_v_image_); __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_rgb = __pyx_t_4; __pyx_t_4 = 0; /* "Surface_tools.pyx":96 * # raise TypeError("\nSurface without per-pixel information.") * * try: # <<<<<<<<<<<<<< * rgb = pixels3d(image_) * except (pygame.error, ValueError): */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":98 * try: * rgb = pixels3d(image_) * except (pygame.error, ValueError): # <<<<<<<<<<<<<< * raise ValueError('\nInvalid surface.') * */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 98, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_error); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_5) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(0, 98, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_7); /* "Surface_tools.pyx":99 * rgb = pixels3d(image_) * except (pygame.error, ValueError): * raise ValueError('\nInvalid surface.') # <<<<<<<<<<<<<< * * try: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 99, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "Surface_tools.pyx":96 * # raise TypeError("\nSurface without per-pixel information.") * * try: # <<<<<<<<<<<<<< * rgb = pixels3d(image_) * except (pygame.error, ValueError): */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "Surface_tools.pyx":101 * raise ValueError('\nInvalid surface.') * * try: # <<<<<<<<<<<<<< * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_1); /*try:*/ { /* "Surface_tools.pyx":102 * * try: * alpha = pixels_alpha(image_) # <<<<<<<<<<<<<< * except (pygame.error, ValueError): * raise ValueError('\nSurface without per-pixel information.') */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pixels_alpha); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 102, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_image_); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 102, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_7); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_v_image_}; __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 102, __pyx_L11_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_v_image_}; __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 102, __pyx_L11_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_7); } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 102, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_v_image_); __Pyx_GIVEREF(__pyx_v_image_); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_v_image_); __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 102, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_alpha = __pyx_t_7; __pyx_t_7 = 0; /* "Surface_tools.pyx":101 * raise ValueError('\nInvalid surface.') * * try: # <<<<<<<<<<<<<< * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L16_try_end; __pyx_L11_error:; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "Surface_tools.pyx":103 * try: * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): # <<<<<<<<<<<<<< * raise ValueError('\nSurface without per-pixel information.') * */ __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 103, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_error); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 103, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_4) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_7, &__pyx_t_6) < 0) __PYX_ERR(0, 103, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_6); /* "Surface_tools.pyx":104 * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): * raise ValueError('\nSurface without per-pixel information.') # <<<<<<<<<<<<<< * * cdef int w, h */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 104, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 104, __pyx_L13_except_error) } goto __pyx_L13_except_error; __pyx_L13_except_error:; /* "Surface_tools.pyx":101 * raise ValueError('\nInvalid surface.') * * try: # <<<<<<<<<<<<<< * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_1); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); goto __pyx_L1_error; __pyx_L16_try_end:; } /* "Surface_tools.pyx":107 * * cdef int w, h * w, h = image_.get_size() # <<<<<<<<<<<<<< * * # REMOVE FOR SPEED */ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_image_, __pyx_n_s_get_size); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (__pyx_t_4) { __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { __pyx_t_6 = __Pyx_PyObject_CallNoArg(__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 107, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_6))) || (PyList_CheckExact(__pyx_t_6))) { PyObject* sequence = __pyx_t_6; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 107, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_7 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_7 = PyList_GET_ITEM(sequence, 0); __pyx_t_4 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_7 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else { Py_ssize_t index = -1; __pyx_t_5 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_9 = Py_TYPE(__pyx_t_5)->tp_iternext; index = 0; __pyx_t_7 = __pyx_t_9(__pyx_t_5); if (unlikely(!__pyx_t_7)) goto __pyx_L19_unpacking_failed; __Pyx_GOTREF(__pyx_t_7); index = 1; __pyx_t_4 = __pyx_t_9(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L19_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_5), 2) < 0) __PYX_ERR(0, 107, __pyx_L1_error) __pyx_t_9 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L20_unpacking_done; __pyx_L19_unpacking_failed:; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_9 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 107, __pyx_L1_error) __pyx_L20_unpacking_done:; } __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_7); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 107, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_w = __pyx_t_8; __pyx_v_h = __pyx_t_10; /* "Surface_tools.pyx":115 * * cdef: * unsigned char [:, :, ::1] new_array = numpy.empty((h, w, 4), dtype=numpy.uint8) # <<<<<<<<<<<<<< * unsigned char [:, :] alpha_array = alpha * unsigned char [:, :, :] rgb_array = rgb */ __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_h); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_w); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_7); __Pyx_INCREF(__pyx_int_4); __Pyx_GIVEREF(__pyx_int_4); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_int_4); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_uint8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_11) < 0) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_12 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(__pyx_t_11); if (unlikely(!__pyx_t_12.memview)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_v_new_array = __pyx_t_12; __pyx_t_12.memview = NULL; __pyx_t_12.data = NULL; /* "Surface_tools.pyx":116 * cdef: * unsigned char [:, :, ::1] new_array = numpy.empty((h, w, 4), dtype=numpy.uint8) * unsigned char [:, :] alpha_array = alpha # <<<<<<<<<<<<<< * unsigned char [:, :, :] rgb_array = rgb * int i=0, j=0, a */ __pyx_t_13 = __Pyx_PyObject_to_MemoryviewSlice_dsds_unsigned_char(__pyx_v_alpha); if (unlikely(!__pyx_t_13.memview)) __PYX_ERR(0, 116, __pyx_L1_error) __pyx_v_alpha_array = __pyx_t_13; __pyx_t_13.memview = NULL; __pyx_t_13.data = NULL; /* "Surface_tools.pyx":117 * unsigned char [:, :, ::1] new_array = numpy.empty((h, w, 4), dtype=numpy.uint8) * unsigned char [:, :] alpha_array = alpha * unsigned char [:, :, :] rgb_array = rgb # <<<<<<<<<<<<<< * int i=0, j=0, a * */ __pyx_t_14 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_unsigned_char(__pyx_v_rgb); if (unlikely(!__pyx_t_14.memview)) __PYX_ERR(0, 117, __pyx_L1_error) __pyx_v_rgb_array = __pyx_t_14; __pyx_t_14.memview = NULL; __pyx_t_14.data = NULL; /* "Surface_tools.pyx":118 * unsigned char [:, :] alpha_array = alpha * unsigned char [:, :, :] rgb_array = rgb * int i=0, j=0, a # <<<<<<<<<<<<<< * * with nogil: */ __pyx_v_i = 0; __pyx_v_j = 0; /* "Surface_tools.pyx":120 * int i=0, j=0, a * * with nogil: # <<<<<<<<<<<<<< * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "Surface_tools.pyx":122 * with nogil: * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): # <<<<<<<<<<<<<< * for j in range(h): * new_array[j, i, 0] = rgb_array[i, j, 0] */ __pyx_t_10 = __pyx_v_w; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_15 = (__pyx_t_10 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_15 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(8) private(__pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_a) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) schedule(static) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_15; __pyx_t_8++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* Initialize private variables to invalid values */ __pyx_v_a = ((int)0xbad0bad0); __pyx_v_j = ((int)0xbad0bad0); /* "Surface_tools.pyx":123 * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(h): # <<<<<<<<<<<<<< * new_array[j, i, 0] = rgb_array[i, j, 0] * new_array[j, i, 1] = rgb_array[i, j, 1] */ __pyx_t_16 = __pyx_v_h; for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "Surface_tools.pyx":124 * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): * for j in range(h): * new_array[j, i, 0] = rgb_array[i, j, 0] # <<<<<<<<<<<<<< * new_array[j, i, 1] = rgb_array[i, j, 1] * new_array[j, i, 2] = rgb_array[i, j, 2] */ __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_j; __pyx_t_20 = 0; __pyx_t_21 = __pyx_v_j; __pyx_t_22 = __pyx_v_i; __pyx_t_23 = 0; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_21 * __pyx_v_new_array.strides[0]) ) + __pyx_t_22 * __pyx_v_new_array.strides[1]) )) + __pyx_t_23)) )) = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array.data + __pyx_t_18 * __pyx_v_rgb_array.strides[0]) ) + __pyx_t_19 * __pyx_v_rgb_array.strides[1]) ) + __pyx_t_20 * __pyx_v_rgb_array.strides[2]) ))); /* "Surface_tools.pyx":125 * for j in range(h): * new_array[j, i, 0] = rgb_array[i, j, 0] * new_array[j, i, 1] = rgb_array[i, j, 1] # <<<<<<<<<<<<<< * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ */ __pyx_t_24 = __pyx_v_i; __pyx_t_25 = __pyx_v_j; __pyx_t_26 = 1; __pyx_t_27 = __pyx_v_j; __pyx_t_28 = __pyx_v_i; __pyx_t_29 = 1; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_27 * __pyx_v_new_array.strides[0]) ) + __pyx_t_28 * __pyx_v_new_array.strides[1]) )) + __pyx_t_29)) )) = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array.data + __pyx_t_24 * __pyx_v_rgb_array.strides[0]) ) + __pyx_t_25 * __pyx_v_rgb_array.strides[1]) ) + __pyx_t_26 * __pyx_v_rgb_array.strides[2]) ))); /* "Surface_tools.pyx":126 * new_array[j, i, 0] = rgb_array[i, j, 0] * new_array[j, i, 1] = rgb_array[i, j, 1] * new_array[j, i, 2] = rgb_array[i, j, 2] # <<<<<<<<<<<<<< * a = alpha_array[i, j] - alpha_ * if a < 0: */ __pyx_t_30 = __pyx_v_i; __pyx_t_31 = __pyx_v_j; __pyx_t_32 = 2; __pyx_t_33 = __pyx_v_j; __pyx_t_34 = __pyx_v_i; __pyx_t_35 = 2; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_33 * __pyx_v_new_array.strides[0]) ) + __pyx_t_34 * __pyx_v_new_array.strides[1]) )) + __pyx_t_35)) )) = (*((unsigned char *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_rgb_array.data + __pyx_t_30 * __pyx_v_rgb_array.strides[0]) ) + __pyx_t_31 * __pyx_v_rgb_array.strides[1]) ) + __pyx_t_32 * __pyx_v_rgb_array.strides[2]) ))); /* "Surface_tools.pyx":127 * new_array[j, i, 1] = rgb_array[i, j, 1] * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ # <<<<<<<<<<<<<< * if a < 0: * a = 0 */ __pyx_t_36 = __pyx_v_i; __pyx_t_37 = __pyx_v_j; __pyx_v_a = ((*((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_alpha_array.data + __pyx_t_36 * __pyx_v_alpha_array.strides[0]) ) + __pyx_t_37 * __pyx_v_alpha_array.strides[1]) ))) - __pyx_v_alpha_); /* "Surface_tools.pyx":128 * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ * if a < 0: # <<<<<<<<<<<<<< * a = 0 * new_array[j, i, 3] = a */ __pyx_t_38 = ((__pyx_v_a < 0) != 0); if (__pyx_t_38) { /* "Surface_tools.pyx":129 * a = alpha_array[i, j] - alpha_ * if a < 0: * a = 0 # <<<<<<<<<<<<<< * new_array[j, i, 3] = a * */ __pyx_v_a = 0; /* "Surface_tools.pyx":128 * new_array[j, i, 2] = rgb_array[i, j, 2] * a = alpha_array[i, j] - alpha_ * if a < 0: # <<<<<<<<<<<<<< * a = 0 * new_array[j, i, 3] = a */ } /* "Surface_tools.pyx":130 * if a < 0: * a = 0 * new_array[j, i, 3] = a # <<<<<<<<<<<<<< * * return pygame.image.frombuffer(new_array, (w, h), 'RGBA') */ __pyx_t_39 = __pyx_v_j; __pyx_t_40 = __pyx_v_i; __pyx_t_41 = 3; *((unsigned char *) ( /* dim=2 */ ((char *) (((unsigned char *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_new_array.data + __pyx_t_39 * __pyx_v_new_array.strides[0]) ) + __pyx_t_40 * __pyx_v_new_array.strides[1]) )) + __pyx_t_41)) )) = __pyx_v_a; } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "Surface_tools.pyx":120 * int i=0, j=0, a * * with nogil: # <<<<<<<<<<<<<< * * for i in prange(w, schedule=SCHEDULE, num_threads=THREAD_NUMBER): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L23; } __pyx_L23:; } } /* "Surface_tools.pyx":132 * new_array[j, i, 3] = a * * return pygame.image.frombuffer(new_array, (w, h), 'RGBA') # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pygame); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_image); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_frombuffer); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_memoryview_fromslice(__pyx_v_new_array, 3, (PyObject *(*)(char *)) __pyx_memview_get_unsigned_char, (int (*)(char *, PyObject *)) __pyx_memview_set_unsigned_char, 0);; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_w); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_h); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_42 = PyTuple_New(2); if (unlikely(!__pyx_t_42)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_42); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_42, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_42, 1, __pyx_t_6); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_t_6 = NULL; __pyx_t_15 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_15 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_6, __pyx_t_7, __pyx_t_42, __pyx_n_s_RGBA}; __pyx_t_11 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_15, 3+__pyx_t_15); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_42); __pyx_t_42 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[4] = {__pyx_t_6, __pyx_t_7, __pyx_t_42, __pyx_n_s_RGBA}; __pyx_t_11 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_15, 3+__pyx_t_15); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_42); __pyx_t_42 = 0; } else #endif { __pyx_t_4 = PyTuple_New(3+__pyx_t_15); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0+__pyx_t_15, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_42); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_15, __pyx_t_42); __Pyx_INCREF(__pyx_n_s_RGBA); __Pyx_GIVEREF(__pyx_n_s_RGBA); PyTuple_SET_ITEM(__pyx_t_4, 2+__pyx_t_15, __pyx_n_s_RGBA); __pyx_t_7 = 0; __pyx_t_42 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_11; __pyx_t_11 = 0; goto __pyx_L0; /* "Surface_tools.pyx":75 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef make_transparent(image_, int alpha_): # <<<<<<<<<<<<<< * """ * MODIFY TRANSPARENCY TO A PYGAME SURFACE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __PYX_XDEC_MEMVIEW(&__pyx_t_12, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_13, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_14, 1); __Pyx_XDECREF(__pyx_t_42); __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_rgb); __Pyx_XDECREF(__pyx_v_alpha); __PYX_XDEC_MEMVIEW(&__pyx_v_new_array, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_alpha_array, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_rgb_array, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_13Surface_tools_1make_transparent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_13Surface_tools_make_transparent[] = "\n MODIFY TRANSPARENCY TO A PYGAME SURFACE \n \n :param image_: Surface; pygame.Surface to modify \n :param alpha_: int; integer value representing the new alpha value \n :return: Surface with new alpha value\n "; static PyObject *__pyx_pw_13Surface_tools_1make_transparent(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_image_ = 0; int __pyx_v_alpha_; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("make_transparent (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_image_2,&__pyx_n_s_alpha,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_image_2)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_transparent", 1, 2, 2, 1); __PYX_ERR(0, 75, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_transparent") < 0)) __PYX_ERR(0, 75, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_image_ = values[0]; __pyx_v_alpha_ = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_alpha_ == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("make_transparent", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 75, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_13Surface_tools_make_transparent(__pyx_self, __pyx_v_image_, __pyx_v_alpha_); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_13Surface_tools_make_transparent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_image_, int __pyx_v_alpha_) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("make_transparent", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_13Surface_tools_make_transparent(__pyx_v_image_, __pyx_v_alpha_, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("Surface_tools.make_transparent", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "Surface_tools.pyx":139 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef reshape(sprite_, factor_=1.0): # <<<<<<<<<<<<<< * """ * RESHAPE ANIMATION OR IMAGE */ static PyObject *__pyx_pw_13Surface_tools_3reshape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_13Surface_tools_reshape(PyObject *__pyx_v_sprite_, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_13Surface_tools_reshape *__pyx_optional_args) { PyObject *__pyx_v_factor_ = ((PyObject *)__pyx_float_1_0); float __pyx_v_f_factor_; PyObject *__pyx_v_t_factor_ = 0; int __pyx_v_i; int __pyx_v_c1; int __pyx_v_c2; PyObject *__pyx_v_sprite_copy = 0; PyObject *__pyx_v_surface = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; float __pyx_t_7; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; Py_ssize_t __pyx_t_13; PyObject *(*__pyx_t_14)(PyObject *); PyObject *__pyx_t_15 = NULL; __Pyx_RefNannySetupContext("reshape", 0); if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_factor_ = __pyx_optional_args->factor_; } } /* "Surface_tools.pyx":152 * tuple t_factor_ * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * # FLOAT OR INT * try: */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_2 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":154 * if PyObject_IsInstance(factor_, (float, int)): * # FLOAT OR INT * try: # <<<<<<<<<<<<<< * f_factor_ = <float>factor_ * if f_factor_ == 1.0: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_6); /*try:*/ { /* "Surface_tools.pyx":155 * # FLOAT OR INT * try: * f_factor_ = <float>factor_ # <<<<<<<<<<<<<< * if f_factor_ == 1.0: * return sprite_ */ __pyx_t_7 = __pyx_PyFloat_AsFloat(__pyx_v_factor_); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L4_error) __pyx_v_f_factor_ = ((float)__pyx_t_7); /* "Surface_tools.pyx":156 * try: * f_factor_ = <float>factor_ * if f_factor_ == 1.0: # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ __pyx_t_3 = ((__pyx_v_f_factor_ == 1.0) != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":157 * f_factor_ = <float>factor_ * if f_factor_ == 1.0: * return sprite_ # <<<<<<<<<<<<<< * except ValueError: * raise ValueError('\nArgument factor_ must be float or int got %s ' % type(factor_)) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_sprite_); __pyx_r = __pyx_v_sprite_; goto __pyx_L8_try_return; /* "Surface_tools.pyx":156 * try: * f_factor_ = <float>factor_ * if f_factor_ == 1.0: # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ } /* "Surface_tools.pyx":154 * if PyObject_IsInstance(factor_, (float, int)): * # FLOAT OR INT * try: # <<<<<<<<<<<<<< * f_factor_ = <float>factor_ * if f_factor_ == 1.0: */ } __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "Surface_tools.pyx":158 * if f_factor_ == 1.0: * return sprite_ * except ValueError: # <<<<<<<<<<<<<< * raise ValueError('\nArgument factor_ must be float or int got %s ' % type(factor_)) * # TUPLE */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_8) { __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_9, &__pyx_t_10) < 0) __PYX_ERR(0, 158, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_10); /* "Surface_tools.pyx":159 * return sprite_ * except ValueError: * raise ValueError('\nArgument factor_ must be float or int got %s ' % type(factor_)) # <<<<<<<<<<<<<< * # TUPLE * else: */ __pyx_t_11 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__must_be_float, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 159, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 159, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_12, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 159, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(0, 159, __pyx_L6_except_error) } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "Surface_tools.pyx":154 * if PyObject_IsInstance(factor_, (float, int)): * # FLOAT OR INT * try: # <<<<<<<<<<<<<< * f_factor_ = <float>factor_ * if f_factor_ == 1.0: */ __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L8_try_return:; __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L0; __pyx_L9_try_end:; } /* "Surface_tools.pyx":152 * tuple t_factor_ * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * # FLOAT OR INT * try: */ goto __pyx_L3; } /* "Surface_tools.pyx":162 * # TUPLE * else: * try: # <<<<<<<<<<<<<< * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): */ /*else*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_5, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "Surface_tools.pyx":163 * else: * try: * t_factor_ = tuple(factor_) # <<<<<<<<<<<<<< * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): * return sprite_ */ __pyx_t_10 = __Pyx_PySequence_Tuple(__pyx_v_factor_); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 163, __pyx_L13_error) __Pyx_GOTREF(__pyx_t_10); __pyx_v_t_factor_ = ((PyObject*)__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":164 * try: * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ __pyx_t_7 = __pyx_PyFloat_AsFloat(PyTuple_GET_ITEM(__pyx_v_t_factor_, 0)); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L13_error) __pyx_t_2 = ((((float)__pyx_t_7) == 0.0) != 0); if (__pyx_t_2) { } else { __pyx_t_3 = __pyx_t_2; goto __pyx_L20_bool_binop_done; } __pyx_t_7 = __pyx_PyFloat_AsFloat(PyTuple_GET_ITEM(__pyx_v_t_factor_, 1)); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L13_error) __pyx_t_2 = ((((float)__pyx_t_7) == 0.0) != 0); __pyx_t_3 = __pyx_t_2; __pyx_L20_bool_binop_done:; if (__pyx_t_3) { /* "Surface_tools.pyx":165 * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): * return sprite_ # <<<<<<<<<<<<<< * except ValueError: * raise ValueError('\nArgument factor_ must be a list or tuple got %s ' % type(factor_)) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_sprite_); __pyx_r = __pyx_v_sprite_; goto __pyx_L17_try_return; /* "Surface_tools.pyx":164 * try: * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): # <<<<<<<<<<<<<< * return sprite_ * except ValueError: */ } /* "Surface_tools.pyx":162 * # TUPLE * else: * try: # <<<<<<<<<<<<<< * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): */ } __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L18_try_end; __pyx_L13_error:; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":166 * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): * return sprite_ * except ValueError: # <<<<<<<<<<<<<< * raise ValueError('\nArgument factor_ must be a list or tuple got %s ' % type(factor_)) * */ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_8) { __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_10, &__pyx_t_9, &__pyx_t_1) < 0) __PYX_ERR(0, 166, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_1); /* "Surface_tools.pyx":167 * return sprite_ * except ValueError: * raise ValueError('\nArgument factor_ must be a list or tuple got %s ' % type(factor_)) # <<<<<<<<<<<<<< * * cdef: */ __pyx_t_11 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__must_be_a_list, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 167, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 167, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_12, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 167, __pyx_L15_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(0, 167, __pyx_L15_except_error) } goto __pyx_L15_except_error; __pyx_L15_except_error:; /* "Surface_tools.pyx":162 * # TUPLE * else: * try: # <<<<<<<<<<<<<< * t_factor_ = tuple(factor_) * if (<float>t_factor_[0] == 0.0 and <float>t_factor_[1] == 0.0): */ __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_5, __pyx_t_4); goto __pyx_L1_error; __pyx_L17_try_return:; __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_5, __pyx_t_4); goto __pyx_L0; __pyx_L18_try_end:; } } __pyx_L3:; /* "Surface_tools.pyx":170 * * cdef: * int i = 0 # <<<<<<<<<<<<<< * int w, h * int c1, c2 */ __pyx_v_i = 0; /* "Surface_tools.pyx":173 * int w, h * int c1, c2 * sprite_copy = sprite_.copy() # <<<<<<<<<<<<<< * * if PyObject_IsInstance(factor_, (float, int)): */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sprite_, __pyx_n_s_copy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_10) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 173, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 173, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_sprite_copy = __pyx_t_1; __pyx_t_1 = 0; /* "Surface_tools.pyx":175 * sprite_copy = sprite_.copy() * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_3 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_1); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 175, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":176 * * if PyObject_IsInstance(factor_, (float, int)): * if PyObject_IsInstance(sprite_, list): # <<<<<<<<<<<<<< * c1 = <int>(sprite_[i].get_width() * factor_) * c2 = <int>(sprite_[i].get_height() * factor_) */ __pyx_t_2 = PyObject_IsInstance(__pyx_v_sprite_, ((PyObject *)(&PyList_Type))); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 176, __pyx_L1_error) __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":177 * if PyObject_IsInstance(factor_, (float, int)): * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) # <<<<<<<<<<<<<< * c2 = <int>(sprite_[i].get_height() * factor_) * else: */ __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_sprite_, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_get_width); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_10))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); } } if (__pyx_t_9) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyNumber_Multiply(__pyx_t_1, __pyx_v_factor_); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_10); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_c1 = ((int)__pyx_t_8); /* "Surface_tools.pyx":178 * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) * c2 = <int>(sprite_[i].get_height() * factor_) # <<<<<<<<<<<<<< * else: * c1 = <int>(sprite_.get_width() * factor_) */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_sprite_, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_get_height); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_1) { __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_10 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 178, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyNumber_Multiply(__pyx_t_10, __pyx_v_factor_); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_9); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 178, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_c2 = ((int)__pyx_t_8); /* "Surface_tools.pyx":176 * * if PyObject_IsInstance(factor_, (float, int)): * if PyObject_IsInstance(sprite_, list): # <<<<<<<<<<<<<< * c1 = <int>(sprite_[i].get_width() * factor_) * c2 = <int>(sprite_[i].get_height() * factor_) */ goto __pyx_L25; } /* "Surface_tools.pyx":180 * c2 = <int>(sprite_[i].get_height() * factor_) * else: * c1 = <int>(sprite_.get_width() * factor_) # <<<<<<<<<<<<<< * c2 = <int>(sprite_.get_height() * factor_) * */ /*else*/ { __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_sprite_, __pyx_n_s_get_width); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_10))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); } } if (__pyx_t_1) { __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 180, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyNumber_Multiply(__pyx_t_9, __pyx_v_factor_); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_10); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 180, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_c1 = ((int)__pyx_t_8); /* "Surface_tools.pyx":181 * else: * c1 = <int>(sprite_.get_width() * factor_) * c2 = <int>(sprite_.get_height() * factor_) # <<<<<<<<<<<<<< * * # ANIMATION */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_sprite_, __pyx_n_s_get_height); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_1) { __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_10 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 181, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyNumber_Multiply(__pyx_t_10, __pyx_v_factor_); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_9); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_c2 = ((int)__pyx_t_8); } __pyx_L25:; /* "Surface_tools.pyx":175 * sprite_copy = sprite_.copy() * * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * if PyObject_IsInstance(sprite_, list): * c1 = <int>(sprite_[i].get_width() * factor_) */ } /* "Surface_tools.pyx":184 * * # ANIMATION * if PyObject_IsInstance(sprite_copy, list): # <<<<<<<<<<<<<< * * for surface in sprite_copy: */ __pyx_t_3 = PyObject_IsInstance(__pyx_v_sprite_copy, ((PyObject *)(&PyList_Type))); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 184, __pyx_L1_error) __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":186 * if PyObject_IsInstance(sprite_copy, list): * * for surface in sprite_copy: # <<<<<<<<<<<<<< * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) */ if (likely(PyList_CheckExact(__pyx_v_sprite_copy)) || PyTuple_CheckExact(__pyx_v_sprite_copy)) { __pyx_t_9 = __pyx_v_sprite_copy; __Pyx_INCREF(__pyx_t_9); __pyx_t_13 = 0; __pyx_t_14 = NULL; } else { __pyx_t_13 = -1; __pyx_t_9 = PyObject_GetIter(__pyx_v_sprite_copy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_14 = Py_TYPE(__pyx_t_9)->tp_iternext; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 186, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_14)) { if (likely(PyList_CheckExact(__pyx_t_9))) { if (__pyx_t_13 >= PyList_GET_SIZE(__pyx_t_9)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_10 = PyList_GET_ITEM(__pyx_t_9, __pyx_t_13); __Pyx_INCREF(__pyx_t_10); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 186, __pyx_L1_error) #else __pyx_t_10 = PySequence_ITEM(__pyx_t_9, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); #endif } else { if (__pyx_t_13 >= PyTuple_GET_SIZE(__pyx_t_9)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_10 = PyTuple_GET_ITEM(__pyx_t_9, __pyx_t_13); __Pyx_INCREF(__pyx_t_10); __pyx_t_13++; if (unlikely(0 < 0)) __PYX_ERR(0, 186, __pyx_L1_error) #else __pyx_t_10 = PySequence_ITEM(__pyx_t_9, __pyx_t_13); __pyx_t_13++; if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); #endif } } else { __pyx_t_10 = __pyx_t_14(__pyx_t_9); if (unlikely(!__pyx_t_10)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 186, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_10); } __Pyx_XDECREF_SET(__pyx_v_surface, __pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":187 * * for surface in sprite_copy: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_10, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_2 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_10); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 187, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":188 * for surface in sprite_copy: * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) # <<<<<<<<<<<<<< * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_PyInt_From_int(__pyx_v_c1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_c2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_15, 1, __pyx_t_12); __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_12 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_12 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_12)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_12); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_12, __pyx_v_surface, __pyx_t_15}; __pyx_t_10 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_12, __pyx_v_surface, __pyx_t_15}; __pyx_t_10 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } else #endif { __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_12) { __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_12); __pyx_t_12 = NULL; } __Pyx_INCREF(__pyx_v_surface); __Pyx_GIVEREF(__pyx_v_surface); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_8, __pyx_v_surface); __Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_8, __pyx_t_15); __pyx_t_15 = 0; __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(__Pyx_SetItemInt(__pyx_v_sprite_copy, __pyx_v_i, __pyx_t_10, int, 1, __Pyx_PyInt_From_int, 0, 0, 0) < 0)) __PYX_ERR(0, 188, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":187 * * for surface in sprite_copy: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ goto __pyx_L29; } /* "Surface_tools.pyx":189 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: */ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(((PyObject *)(&PyTuple_Type))); __Pyx_GIVEREF(((PyObject *)(&PyTuple_Type))); PyTuple_SET_ITEM(__pyx_t_10, 0, ((PyObject *)(&PyTuple_Type))); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_10, 1, ((PyObject *)(&PyList_Type))); __pyx_t_3 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_10); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 189, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":190 * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) # <<<<<<<<<<<<<< * else: * raise ValueError('\nArgument factor_ incorrect ' */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_GetItemInt(__pyx_v_factor_, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_15 = __Pyx_GetItemInt(__pyx_v_factor_, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_11); __Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_15); __pyx_t_11 = 0; __pyx_t_15 = 0; __pyx_t_15 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_15)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_15); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_15, __pyx_v_surface, __pyx_t_12}; __pyx_t_10 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_15, __pyx_v_surface, __pyx_t_12}; __pyx_t_10 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif { __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_15) { __Pyx_GIVEREF(__pyx_t_15); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_15); __pyx_t_15 = NULL; } __Pyx_INCREF(__pyx_v_surface); __Pyx_GIVEREF(__pyx_v_surface); PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_8, __pyx_v_surface); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_8, __pyx_t_12); __pyx_t_12 = 0; __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(__Pyx_SetItemInt(__pyx_v_sprite_copy, __pyx_v_i, __pyx_t_10, int, 1, __Pyx_PyInt_From_int, 0, 0, 0) < 0)) __PYX_ERR(0, 190, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "Surface_tools.pyx":189 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: */ goto __pyx_L29; } /* "Surface_tools.pyx":192 * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * i += 1 */ /*else*/ { /* "Surface_tools.pyx":193 * else: * raise ValueError('\nArgument factor_ incorrect ' * 'type must be float, int or tuple got %s ' % type(factor_)) # <<<<<<<<<<<<<< * i += 1 * */ __pyx_t_10 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__incorrect_type, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); /* "Surface_tools.pyx":192 * sprite_copy[i] = smoothscale(surface, (factor_[0], factor_[1])) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * i += 1 */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_1, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(0, 192, __pyx_L1_error) } __pyx_L29:; /* "Surface_tools.pyx":194 * raise ValueError('\nArgument factor_ incorrect ' * 'type must be float, int or tuple got %s ' % type(factor_)) * i += 1 # <<<<<<<<<<<<<< * * # SINGLE IMAGE */ __pyx_v_i = (__pyx_v_i + 1); /* "Surface_tools.pyx":186 * if PyObject_IsInstance(sprite_copy, list): * * for surface in sprite_copy: # <<<<<<<<<<<<<< * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy[i] = smoothscale(surface, (c1, c2)) */ } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":184 * * # ANIMATION * if PyObject_IsInstance(sprite_copy, list): # <<<<<<<<<<<<<< * * for surface in sprite_copy: */ goto __pyx_L26; } /* "Surface_tools.pyx":198 * # SINGLE IMAGE * else: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ /*else*/ { __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(((PyObject *)(&PyFloat_Type))); __Pyx_GIVEREF(((PyObject *)(&PyFloat_Type))); PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)(&PyFloat_Type))); __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1, ((PyObject *)(&PyInt_Type))); __pyx_t_2 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_9); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "Surface_tools.pyx":199 * else: * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy = smoothscale(sprite_copy,(c1, c2)) # <<<<<<<<<<<<<< * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) */ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_c1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_PyInt_From_int(__pyx_v_c2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_11); __pyx_t_1 = 0; __pyx_t_11 = 0; __pyx_t_11 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_12}; __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[3] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_12}; __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif { __pyx_t_1 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (__pyx_t_11) { __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_11); __pyx_t_11 = NULL; } __Pyx_INCREF(__pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_v_sprite_copy); PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_8, __pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_8, __pyx_t_12); __pyx_t_12 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_1, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_sprite_copy, __pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":198 * # SINGLE IMAGE * else: * if PyObject_IsInstance(factor_, (float, int)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): */ goto __pyx_L30; } /* "Surface_tools.pyx":200 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: */ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(((PyObject *)(&PyTuple_Type))); __Pyx_GIVEREF(((PyObject *)(&PyTuple_Type))); PyTuple_SET_ITEM(__pyx_t_9, 0, ((PyObject *)(&PyTuple_Type))); __Pyx_INCREF(((PyObject *)(&PyList_Type))); __Pyx_GIVEREF(((PyObject *)(&PyList_Type))); PyTuple_SET_ITEM(__pyx_t_9, 1, ((PyObject *)(&PyList_Type))); __pyx_t_3 = PyObject_IsInstance(__pyx_v_factor_, __pyx_t_9); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 200, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_2 = (__pyx_t_3 != 0); if (__pyx_t_2) { /* "Surface_tools.pyx":201 * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) # <<<<<<<<<<<<<< * else: * raise ValueError('\nArgument factor_ incorrect ' */ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_smoothscale); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_factor_, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_12 = __Pyx_GetItemInt(__pyx_v_factor_, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_11 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[4] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_1, __pyx_t_12}; __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[4] = {__pyx_t_11, __pyx_v_sprite_copy, __pyx_t_1, __pyx_t_12}; __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 3+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } else #endif { __pyx_t_15 = PyTuple_New(3+__pyx_t_8); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); if (__pyx_t_11) { __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_11); __pyx_t_11 = NULL; } __Pyx_INCREF(__pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_v_sprite_copy); PyTuple_SET_ITEM(__pyx_t_15, 0+__pyx_t_8, __pyx_v_sprite_copy); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_15, 1+__pyx_t_8, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_15, 2+__pyx_t_8, __pyx_t_12); __pyx_t_1 = 0; __pyx_t_12 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_15, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_sprite_copy, __pyx_t_9); __pyx_t_9 = 0; /* "Surface_tools.pyx":200 * if PyObject_IsInstance(factor_, (float, int)): * sprite_copy = smoothscale(sprite_copy,(c1, c2)) * elif PyObject_IsInstance(factor_, (tuple, list)): # <<<<<<<<<<<<<< * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: */ goto __pyx_L30; } /* "Surface_tools.pyx":203 * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * */ /*else*/ { /* "Surface_tools.pyx":204 * else: * raise ValueError('\nArgument factor_ incorrect ' * 'type must be float, int or tuple got %s ' % type(factor_)) # <<<<<<<<<<<<<< * * return sprite_copy */ __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Argument_factor__incorrect_type, ((PyObject *)Py_TYPE(__pyx_v_factor_))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 204, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); /* "Surface_tools.pyx":203 * sprite_copy = smoothscale(sprite_copy,factor_[0], factor_[1]) * else: * raise ValueError('\nArgument factor_ incorrect ' # <<<<<<<<<<<<<< * 'type must be float, int or tuple got %s ' % type(factor_)) * */ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(0, 203, __pyx_L1_error) } __pyx_L30:; } __pyx_L26:; /* "Surface_tools.pyx":206 * 'type must be float, int or tuple got %s ' % type(factor_)) * * return sprite_copy # <<<<<<<<<<<<<< * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_sprite_copy); __pyx_r = __pyx_v_sprite_copy; goto __pyx_L0; /* "Surface_tools.pyx":139 * @cython.nonecheck(False) * @cython.cdivision(True) * cpdef reshape(sprite_, factor_=1.0): # <<<<<<<<<<<<<< * """ * RESHAPE ANIMATION OR IMAGE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_12); __Pyx_XDECREF(__pyx_t_15); __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_t_factor_); __Pyx_XDECREF(__pyx_v_sprite_copy); __Pyx_XDECREF(__pyx_v_surface); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static PyObject *__pyx_pw_13Surface_tools_3reshape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_13Surface_tools_2reshape[] = "\n RESHAPE ANIMATION OR IMAGE \n \n :param sprite_: list, image; list containing the surface to rescale\n :param factor_: float, int or tuple; Represent the scale factor (new size)\n :return: return animation or a single image (rescale) \n "; static PyObject *__pyx_pw_13Surface_tools_3reshape(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_sprite_ = 0; PyObject *__pyx_v_factor_ = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("reshape (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sprite,&__pyx_n_s_factor,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject *)__pyx_float_1_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sprite)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_factor); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "reshape") < 0)) __PYX_ERR(0, 139, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_sprite_ = values[0]; __pyx_v_factor_ = values[1]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("reshape", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 139, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_13Surface_tools_2reshape(__pyx_self, __pyx_v_sprite_, __pyx_v_factor_); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_13Surface_tools_2reshape(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_sprite_, PyObject *__pyx_v_factor_) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; struct __pyx_opt_args_13Surface_tools_reshape __pyx_t_2; __Pyx_RefNannySetupContext("reshape", 0); __Pyx_XDECREF(__pyx_r); __pyx_t_2.__pyx_n = 1; __pyx_t_2.factor_ = __pyx_v_factor_; __pyx_t_1 = __pyx_f_13Surface_tools_reshape(__pyx_v_sprite_, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("Surface_tools.reshape", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) } else { /* "View.MemoryView":121 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":127 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 127, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 127, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":128 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 131, __pyx_L1_error) /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 134, __pyx_L1_error) /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":138 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":139 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 139, __pyx_L1_error) } __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":142 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":143 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 146, __pyx_L1_error) /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":151 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(1, 151, __pyx_L1_error) /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":152 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":156 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":157 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":159 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":160 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":162 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 162, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":164 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":167 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":168 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 174, __pyx_L1_error) /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":177 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":178 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 178, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 178, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":179 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":180 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":184 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":186 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 190, __pyx_L1_error) /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":191 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":192 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":193 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":194 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":195 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":196 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":197 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":198 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":201 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":203 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":205 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":216 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":217 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":225 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":226 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":228 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":229 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":228 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":231 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":232 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":231 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":234 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":235 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":234 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":237 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":238 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 238, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":237 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":242 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":246 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":247 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":246 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":250 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 250, __pyx_L1_error) /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":253 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":242 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":279 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 279, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 279, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":280 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":279 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":282 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":281 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { int __pyx_v_use_setstate; PyObject *__pyx_v_state = NULL; PyObject *__pyx_v__dict = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":3 * def __reduce_cython__(self): * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":4 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":6 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":7 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":9 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":11 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":13 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 15, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":296 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":298 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":302 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":304 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":305 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":304 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":307 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":296 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":343 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 343, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 343, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 343, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 343, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 343, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":344 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":345 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":346 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":347 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 347, __pyx_L1_error) /* "View.MemoryView":348 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":349 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":350 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":348 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":346 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":353 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":354 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":355 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":353 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":356 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":357 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":358 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 359, __pyx_L1_error) /* "View.MemoryView":358 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":356 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":361 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":362 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":361 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":364 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":366 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":368 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":343 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":370 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyThread_type_lock __pyx_t_5; PyThread_type_lock __pyx_t_6; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":371 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":372 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":371 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":376 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":378 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":380 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":382 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":381 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; /* "View.MemoryView":380 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":383 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":378 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":385 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":376 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":370 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":387 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":389 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":391 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 391, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 391, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 391, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 391, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":392 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 392, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 392, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":391 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":394 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":387 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":397 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":398 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":399 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":398 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":401 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 401, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 401, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":404 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 404, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":405 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":404 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":407 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 407, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":408 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":397 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":410 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":411 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 411, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 411, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":413 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":414 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":415 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 415, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":416 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":415 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":418 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 418, __pyx_L1_error) __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":413 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":420 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":410 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":422 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":423 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":425 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":426 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 426, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":425 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":427 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 427, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":428 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":423 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":430 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":422 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":432 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":436 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 436, __pyx_L1_error) /* "View.MemoryView":437 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 437, __pyx_L1_error) /* "View.MemoryView":438 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":436 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 436, __pyx_L1_error) /* "View.MemoryView":432 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":440 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":442 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":447 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":449 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":450 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":451 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":452 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 452, __pyx_L1_error) /* "View.MemoryView":451 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":453 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":449 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":455 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":457 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":458 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":459 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":458 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":461 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 461, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":465 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":466 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":465 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":467 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":470 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":440 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":472 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":473 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 473, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":474 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":472 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":476 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":479 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":482 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":484 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":488 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":489 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":488 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":490 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":485 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 485, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 485, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":486 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 486, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 486, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":476 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":492 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":495 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":500 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":501 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":500 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 503, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":505 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 505, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":506 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":505 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":506 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":492 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":509 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":510 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":511 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":510 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":513 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":515 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":516 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":515 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":518 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":520 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":521 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":520 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":523 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":525 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":526 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":525 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":528 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":530 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":531 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":532 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":533 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":534 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":535 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":509 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":541 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":542 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 542, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 542, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":543 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 543, __pyx_L1_error) /* "View.MemoryView":544 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":541 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":547 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":548 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":547 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":551 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":552 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":551 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":558 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 558, __pyx_L1_error) /* "View.MemoryView":556 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":560 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":565 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":564 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":567 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":570 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":571 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 571, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":570 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":574 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":575 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":574 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":584 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":586 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 586, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":587 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":589 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":583 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":591 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":593 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":594 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":595 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":594 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":597 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":593 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":599 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":600 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":601 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":600 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":599 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":603 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":604 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":603 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":610 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":611 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":607 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":613 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":616 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":617 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":613 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":621 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":623 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":624 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 624, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":633 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":636 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":645 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":646 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":647 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":648 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":645 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":651 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":652 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":651 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":654 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":659 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":660 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 660, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":659 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":662 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":664 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 664, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":665 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":666 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":667 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 667, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 667, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 667, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 667, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":668 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":669 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":670 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 670, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":671 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":669 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":673 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__19); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 673, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":674 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":668 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":676 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":677 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(1, 677, __pyx_L1_error) /* "View.MemoryView":676 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":679 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":680 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 680, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":667 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":682 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":683 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":684 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__20); __Pyx_GIVEREF(__pyx_slice__20); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__20); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":683 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":686 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":654 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":688 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":689 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":690 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":691 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 691, __pyx_L1_error) /* "View.MemoryView":690 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":688 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":698 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":699 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":706 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":710 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 710, __pyx_L1_error) } } #endif /* "View.MemoryView":712 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":713 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 713, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":714 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":712 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":716 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":717 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":723 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":724 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":729 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":730 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":734 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 734, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 734, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 734, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 734, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":735 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":739 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 739, __pyx_L1_error) /* "View.MemoryView":736 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 736, __pyx_L1_error) /* "View.MemoryView":735 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":742 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":743 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":744 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":745 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":746 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":742 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":748 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":749 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 749, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 749, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 749, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":750 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 750, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 750, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":752 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":753 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":754 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 754, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":756 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 756, __pyx_L1_error) /* "View.MemoryView":762 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":734 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":764 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":765 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":766 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 766, __pyx_L1_error) } /* "View.MemoryView":767 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 767, __pyx_L1_error) } /* "View.MemoryView":765 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 765, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":764 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":770 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":771 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":770 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 770, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":698 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":795 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":815 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":817 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":818 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":817 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":819 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":820 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 820, __pyx_L1_error) /* "View.MemoryView":819 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":815 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":823 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":825 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":826 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 826, __pyx_L1_error) /* "View.MemoryView":825 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":829 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":830 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":831 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":832 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":833 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":832 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":830 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":834 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":835 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":836 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":835 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":838 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":834 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":829 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":840 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":840 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":843 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":845 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":846 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":848 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":848 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":846 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":850 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":851 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":850 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":845 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":853 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":853 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":856 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":858 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":858 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":863 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":865 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":865 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":868 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":869 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":868 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":872 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":873 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":874 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":877 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":877 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":880 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":882 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":884 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":885 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":884 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":887 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":888 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 887, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":883 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":890 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":882 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":892 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":795 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":898 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":900 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":901 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":904 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":905 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 905, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 905, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":906 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":904 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":908 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":909 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":910 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":911 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":910 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":913 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":914 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":915 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":916 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 916, __pyx_L1_error) /* "View.MemoryView":915 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":913 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":918 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 919, __pyx_L1_error) /* "View.MemoryView":918 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":922 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":922 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":925 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":898 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":931 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; /* "View.MemoryView":932 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":934 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":935 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":939 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":940 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":941 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":942 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":944 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":945 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(1, 945, __pyx_L1_error) /* "View.MemoryView":944 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":947 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":931 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":964 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":965 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":964 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":967 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":968 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":969 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":968 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":971 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 971, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":967 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":973 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":974 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":975 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 975, __pyx_L1_error) /* "View.MemoryView":974 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":977 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":973 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":980 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":981 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":980 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":987 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":995 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":996 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":995 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1001 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1003 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1004 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1006 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1007 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1009 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1010 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1011 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1012 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1013 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":1015 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1017 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1018 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1021 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1022 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1023 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1025 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":1023 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":1027 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1028 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1029 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1031 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1032 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1034 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":987 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1037 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1040 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1041 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1041, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1042 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1040 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1044 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1045 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1037 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1052 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1053 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1054 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1056 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1057 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1059 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1060 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1061 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1062 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1065 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1068 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1069 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1065 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1072 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1079 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1080 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1081 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1079 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1083 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1084 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1086 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1088 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1072 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1094 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1095 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1096 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1095 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1098 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1094 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1101 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1106 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1107 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1109 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1110 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1111 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1112 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1110 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1114 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1115 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1116 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1117 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1115 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1119 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1120 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1119 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1122 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1101 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1125 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1132 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1133 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1134 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1135 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1137 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1139 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1140 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1142 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1143 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1144 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1145 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1137 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1147 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1148 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1152 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1153 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1125 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1155 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1158 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1155 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1165 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1167 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1168 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1170 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1173 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1182 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1183 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1184 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1185 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1182 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1187 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1188 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1189 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1191 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1194 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; /* "View.MemoryView":1205 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1206 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1208 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1209 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1210 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1210, __pyx_L1_error) /* "View.MemoryView":1209 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1213 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1214 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1215 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1216 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1217 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1219 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1223 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1224 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1225 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1224 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1227 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1228 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1227 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1230 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1232 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1194 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1237 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1240 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1239 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 1239, __pyx_L1_error) /* "View.MemoryView":1237 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1243 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1244 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1244, __pyx_L1_error) /* "View.MemoryView":1243 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1248 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1249 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1249, __pyx_L1_error) /* "View.MemoryView":1248 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1251 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1251, __pyx_L1_error) } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; /* "View.MemoryView":1262 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1263 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1265 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1266 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1267 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1270 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1270 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1272 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1273 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1272 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1275 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1277 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1278 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1279 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1280 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1281 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1279 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1283 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1283, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1278 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1285 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1286 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1286, __pyx_L1_error) /* "View.MemoryView":1285 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1288 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1290 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1293 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == ((void *)NULL))) __PYX_ERR(1, 1293, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1294 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1288 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1296 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1299 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1299 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1301 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1301 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1304 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1306 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1307 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1308 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1309 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1310 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1304 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1296 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1312 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1315 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1315, __pyx_L1_error) /* "View.MemoryView":1316 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1316, __pyx_L1_error) /* "View.MemoryView":1312 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1318 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1319 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1323 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1330 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1332 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1333 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1334 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1335 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1337 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1338 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1339 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1340 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1348 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1352 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1353 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1352 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1348 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1357 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1360 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1357 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1363 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1367 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1368 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1369 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1370 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1369 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1372 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1368 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1374 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1375 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1377 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1363 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1383 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1386 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1387 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1389 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1383 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1393 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1397 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1398 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1400 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1401 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1402 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1403 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1400 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1405 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1406 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1408 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1393 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = NULL; PyObject *__pyx_v___pyx_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":3 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":4 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_5) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":5 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_6) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_INCREF(__pyx_v___pyx_type); __Pyx_GIVEREF(__pyx_v___pyx_type); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v___pyx_type); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_7 = (__pyx_t_1 != 0); if (__pyx_t_7) { /* "(tree fragment)":7 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 7, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":8 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":10 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 10, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 10, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 11, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":12 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_8) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "Surface_tools._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {"make_transparent", (PyCFunction)__pyx_pw_13Surface_tools_1make_transparent, METH_VARARGS|METH_KEYWORDS, __pyx_doc_13Surface_tools_make_transparent}, {"reshape", (PyCFunction)__pyx_pw_13Surface_tools_3reshape, METH_VARARGS|METH_KEYWORDS, __pyx_doc_13Surface_tools_2reshape}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_Surface_tools(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_Surface_tools}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "Surface_tools", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Argument_factor__incorrect_type, __pyx_k_Argument_factor__incorrect_type, sizeof(__pyx_k_Argument_factor__incorrect_type), 0, 0, 1, 0}, {&__pyx_kp_s_Argument_factor__must_be_a_list, __pyx_k_Argument_factor__must_be_a_list, sizeof(__pyx_k_Argument_factor__must_be_a_list), 0, 0, 1, 0}, {&__pyx_kp_s_Argument_factor__must_be_float, __pyx_k_Argument_factor__must_be_float, sizeof(__pyx_k_Argument_factor__must_be_float), 0, 0, 1, 0}, {&__pyx_kp_s_Array_shape_not_understood, __pyx_k_Array_shape_not_understood, sizeof(__pyx_k_Array_shape_not_understood), 0, 0, 1, 0}, {&__pyx_n_s_BLEND_RGB_ADD, __pyx_k_BLEND_RGB_ADD, sizeof(__pyx_k_BLEND_RGB_ADD), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_HWACCEL, __pyx_k_HWACCEL, sizeof(__pyx_k_HWACCEL), 0, 0, 1, 1}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_surface, __pyx_k_Invalid_surface, sizeof(__pyx_k_Invalid_surface), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_kp_s_Pygame_library_is_missing_on_yo, __pyx_k_Pygame_library_is_missing_on_yo, sizeof(__pyx_k_Pygame_library_is_missing_on_yo), 0, 0, 1, 0}, {&__pyx_n_s_RGBA, __pyx_k_RGBA, sizeof(__pyx_k_RGBA), 0, 0, 1, 1}, {&__pyx_n_s_RLEACCEL, __pyx_k_RLEACCEL, sizeof(__pyx_k_RLEACCEL), 0, 0, 1, 1}, {&__pyx_n_s_Rect, __pyx_k_Rect, sizeof(__pyx_k_Rect), 0, 0, 1, 1}, {&__pyx_n_s_SRCALPHA, __pyx_k_SRCALPHA, sizeof(__pyx_k_SRCALPHA), 0, 0, 1, 1}, {&__pyx_n_s_Surface, __pyx_k_Surface, sizeof(__pyx_k_Surface), 0, 0, 1, 1}, {&__pyx_kp_s_Surface_without_per_pixel_infor, __pyx_k_Surface_without_per_pixel_infor, sizeof(__pyx_k_Surface_without_per_pixel_infor), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_Vector2, __pyx_k_Vector2, sizeof(__pyx_k_Vector2), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, {&__pyx_n_s_array3d, __pyx_k_array3d, sizeof(__pyx_k_array3d), 0, 0, 1, 1}, {&__pyx_n_s_array_alpha, __pyx_k_array_alpha, sizeof(__pyx_k_array_alpha), 0, 0, 1, 1}, {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_factor, __pyx_k_factor, sizeof(__pyx_k_factor), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_frombuffer, __pyx_k_frombuffer, sizeof(__pyx_k_frombuffer), 0, 0, 1, 1}, {&__pyx_n_s_get_height, __pyx_k_get_height, sizeof(__pyx_k_get_height), 0, 0, 1, 1}, {&__pyx_n_s_get_size, __pyx_k_get_size, sizeof(__pyx_k_get_size), 0, 0, 1, 1}, {&__pyx_n_s_get_width, __pyx_k_get_width, sizeof(__pyx_k_get_width), 0, 0, 1, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_image, __pyx_k_image, sizeof(__pyx_k_image), 0, 0, 1, 1}, {&__pyx_n_s_image_2, __pyx_k_image_2, sizeof(__pyx_k_image_2), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pixels3d, __pyx_k_pixels3d, sizeof(__pyx_k_pixels3d), 0, 0, 1, 1}, {&__pyx_n_s_pixels_alpha, __pyx_k_pixels_alpha, sizeof(__pyx_k_pixels_alpha), 0, 0, 1, 1}, {&__pyx_n_s_pygame, __pyx_k_pygame, sizeof(__pyx_k_pygame), 0, 0, 1, 1}, {&__pyx_n_s_pygame_math, __pyx_k_pygame_math, sizeof(__pyx_k_pygame_math), 0, 0, 1, 1}, {&__pyx_n_s_pygame_surfarray, __pyx_k_pygame_surfarray, sizeof(__pyx_k_pygame_surfarray), 0, 0, 1, 1}, {&__pyx_n_s_pygame_transform, __pyx_k_pygame_transform, sizeof(__pyx_k_pygame_transform), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_rotate, __pyx_k_rotate, sizeof(__pyx_k_rotate), 0, 0, 1, 1}, {&__pyx_n_s_scale, __pyx_k_scale, sizeof(__pyx_k_scale), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_smoothscale, __pyx_k_smoothscale, sizeof(__pyx_k_smoothscale), 0, 0, 1, 1}, {&__pyx_n_s_sprite, __pyx_k_sprite, sizeof(__pyx_k_sprite), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_uint8, __pyx_k_uint8, sizeof(__pyx_k_uint8), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(0, 15, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 54, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 63, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 601, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 820, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "Surface_tools.pyx":53 * cdef int width, height * try: * width, height = (<object> rgb_array_c).shape[:2] # <<<<<<<<<<<<<< * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') */ __pyx_slice_ = PySlice_New(Py_None, __pyx_int_2, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 53, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); /* "Surface_tools.pyx":55 * width, height = (<object> rgb_array_c).shape[:2] * except (ValueError, pygame.error) as e: * raise ValueError('\nArray shape not understood.') # <<<<<<<<<<<<<< * * cdef: */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Array_shape_not_understood); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 55, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "Surface_tools.pyx":99 * rgb = pixels3d(image_) * except (pygame.error, ValueError): * raise ValueError('\nInvalid surface.') # <<<<<<<<<<<<<< * * try: */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Invalid_surface); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "Surface_tools.pyx":104 * alpha = pixels_alpha(image_) * except (pygame.error, ValueError): * raise ValueError('\nSurface without per-pixel information.') # <<<<<<<<<<<<<< * * cdef int w, h */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Surface_without_per_pixel_infor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 104, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":486 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 486, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":558 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":565 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__15); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "View.MemoryView":670 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); /* "View.MemoryView":673 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__19 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__19)) __PYX_ERR(1, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__19); __Pyx_GIVEREF(__pyx_slice__19); /* "View.MemoryView":684 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__20 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__20)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__20); __Pyx_GIVEREF(__pyx_slice__20); /* "View.MemoryView":691 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "Surface_tools.pyx":32 * * except ImportError: * raise ImportError("\n<Pygame> library is missing on your system." # <<<<<<<<<<<<<< * "\nTry: \n C:\\pip install pygame on a window command prompt.") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_Pygame_library_is_missing_on_yo); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":284 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":285 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); /* "View.MemoryView":286 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); /* "View.MemoryView":289 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "View.MemoryView":290 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_tuple__30 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initSurface_tools(void); /*proto*/ PyMODINIT_FUNC initSurface_tools(void) #else PyMODINIT_FUNC PyInit_Surface_tools(void); /*proto*/ PyMODINIT_FUNC PyInit_Surface_tools(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_Surface_tools(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; static PyThread_type_lock __pyx_t_9[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_Surface_tools(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("Surface_tools", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_Surface_tools) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "Surface_tools")) { if (unlikely(PyDict_SetItemString(modules, "Surface_tools", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 277, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 277, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), 0); if (unlikely(!__pyx_ptype_7cpython_4bool_bool)) __PYX_ERR(3, 8, __pyx_L1_error) __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), 0); if (unlikely(!__pyx_ptype_7cpython_7complex_complex)) __PYX_ERR(4, 15, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "Surface_tools.pyx":4 * ###cython: boundscheck=False, wraparound=False, nonecheck=False, optimize.use_switch=True * * try: # <<<<<<<<<<<<<< * cimport cython * from cython.parallel cimport prange */ { (void)__pyx_t_1; (void)__pyx_t_2; (void)__pyx_t_3; /* mark used */ /*try:*/ { /* "Surface_tools.pyx":13 * PyDict_Values, PyDict_Keys, PyDict_Items * from cpython.list cimport PyList_Append, PyList_GetItem, PyList_Size, PyList_SetItem * from cpython.object cimport PyObject_SetAttr # <<<<<<<<<<<<<< * * except ImportError: */ } } /* "Surface_tools.pyx":19 * "\nTry: \n C:\\pip install cython on a window command prompt.") * * import numpy # <<<<<<<<<<<<<< * from numpy import empty, uint8, asarray * */ __pyx_t_4 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_4) < 0) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":20 * * import numpy * from numpy import empty, uint8, asarray # <<<<<<<<<<<<<< * * try: */ __pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_empty); __Pyx_GIVEREF(__pyx_n_s_empty); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_empty); __Pyx_INCREF(__pyx_n_s_uint8); __Pyx_GIVEREF(__pyx_n_s_uint8); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_n_s_uint8); __Pyx_INCREF(__pyx_n_s_asarray); __Pyx_GIVEREF(__pyx_n_s_asarray); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_n_s_asarray); __pyx_t_5 = __Pyx_Import(__pyx_n_s_numpy, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_empty, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_uint8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_uint8, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_asarray, __pyx_t_4) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":22 * from numpy import empty, uint8, asarray * * try: # <<<<<<<<<<<<<< * import pygame * from pygame import Rect */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_1); /*try:*/ { /* "Surface_tools.pyx":23 * * try: * import pygame # <<<<<<<<<<<<<< * from pygame import Rect * from pygame.math import Vector2 */ __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame, 0, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pygame, __pyx_t_5) < 0) __PYX_ERR(0, 23, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":24 * try: * import pygame * from pygame import Rect # <<<<<<<<<<<<<< * from pygame.math import Vector2 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL */ __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_s_Rect); __Pyx_GIVEREF(__pyx_n_s_Rect); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_Rect); __pyx_t_4 = __Pyx_Import(__pyx_n_s_pygame, __pyx_t_5, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_Rect); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Rect, __pyx_t_5) < 0) __PYX_ERR(0, 24, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":25 * import pygame * from pygame import Rect * from pygame.math import Vector2 # <<<<<<<<<<<<<< * from pygame import Rect, BLEND_RGB_ADD, HWACCEL * from pygame import Surface, SRCALPHA, mask, RLEACCEL */ __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_Vector2); __Pyx_GIVEREF(__pyx_n_s_Vector2); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_Vector2); __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame_math, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_Vector2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Vector2, __pyx_t_4) < 0) __PYX_ERR(0, 25, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":26 * from pygame import Rect * from pygame.math import Vector2 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL # <<<<<<<<<<<<<< * from pygame import Surface, SRCALPHA, mask, RLEACCEL * from pygame.transform import rotate, scale, smoothscale */ __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_s_Rect); __Pyx_GIVEREF(__pyx_n_s_Rect); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_Rect); __Pyx_INCREF(__pyx_n_s_BLEND_RGB_ADD); __Pyx_GIVEREF(__pyx_n_s_BLEND_RGB_ADD); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_n_s_BLEND_RGB_ADD); __Pyx_INCREF(__pyx_n_s_HWACCEL); __Pyx_GIVEREF(__pyx_n_s_HWACCEL); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_n_s_HWACCEL); __pyx_t_4 = __Pyx_Import(__pyx_n_s_pygame, __pyx_t_5, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_Rect); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Rect, __pyx_t_5) < 0) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_BLEND_RGB_ADD); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_BLEND_RGB_ADD, __pyx_t_5) < 0) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_HWACCEL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_HWACCEL, __pyx_t_5) < 0) __PYX_ERR(0, 26, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":27 * from pygame.math import Vector2 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL * from pygame import Surface, SRCALPHA, mask, RLEACCEL # <<<<<<<<<<<<<< * from pygame.transform import rotate, scale, smoothscale * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha */ __pyx_t_4 = PyList_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_Surface); __Pyx_GIVEREF(__pyx_n_s_Surface); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_Surface); __Pyx_INCREF(__pyx_n_s_SRCALPHA); __Pyx_GIVEREF(__pyx_n_s_SRCALPHA); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_n_s_SRCALPHA); __Pyx_INCREF(__pyx_n_s_mask); __Pyx_GIVEREF(__pyx_n_s_mask); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_n_s_mask); __Pyx_INCREF(__pyx_n_s_RLEACCEL); __Pyx_GIVEREF(__pyx_n_s_RLEACCEL); PyList_SET_ITEM(__pyx_t_4, 3, __pyx_n_s_RLEACCEL); __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_Surface); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_Surface, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_SRCALPHA); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_SRCALPHA, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_mask, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_RLEACCEL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_RLEACCEL, __pyx_t_4) < 0) __PYX_ERR(0, 27, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":28 * from pygame import Rect, BLEND_RGB_ADD, HWACCEL * from pygame import Surface, SRCALPHA, mask, RLEACCEL * from pygame.transform import rotate, scale, smoothscale # <<<<<<<<<<<<<< * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha * */ __pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_n_s_rotate); __Pyx_GIVEREF(__pyx_n_s_rotate); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_n_s_rotate); __Pyx_INCREF(__pyx_n_s_scale); __Pyx_GIVEREF(__pyx_n_s_scale); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_n_s_scale); __Pyx_INCREF(__pyx_n_s_smoothscale); __Pyx_GIVEREF(__pyx_n_s_smoothscale); PyList_SET_ITEM(__pyx_t_5, 2, __pyx_n_s_smoothscale); __pyx_t_4 = __Pyx_Import(__pyx_n_s_pygame_transform, __pyx_t_5, -1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_rotate); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_rotate, __pyx_t_5) < 0) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_scale); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_scale, __pyx_t_5) < 0) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_ImportFrom(__pyx_t_4, __pyx_n_s_smoothscale); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_smoothscale, __pyx_t_5) < 0) __PYX_ERR(0, 28, __pyx_L8_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "Surface_tools.pyx":29 * from pygame import Surface, SRCALPHA, mask, RLEACCEL * from pygame.transform import rotate, scale, smoothscale * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha # <<<<<<<<<<<<<< * * except ImportError: */ __pyx_t_4 = PyList_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_array3d); __Pyx_GIVEREF(__pyx_n_s_array3d); PyList_SET_ITEM(__pyx_t_4, 0, __pyx_n_s_array3d); __Pyx_INCREF(__pyx_n_s_pixels3d); __Pyx_GIVEREF(__pyx_n_s_pixels3d); PyList_SET_ITEM(__pyx_t_4, 1, __pyx_n_s_pixels3d); __Pyx_INCREF(__pyx_n_s_array_alpha); __Pyx_GIVEREF(__pyx_n_s_array_alpha); PyList_SET_ITEM(__pyx_t_4, 2, __pyx_n_s_array_alpha); __Pyx_INCREF(__pyx_n_s_pixels_alpha); __Pyx_GIVEREF(__pyx_n_s_pixels_alpha); PyList_SET_ITEM(__pyx_t_4, 3, __pyx_n_s_pixels_alpha); __pyx_t_5 = __Pyx_Import(__pyx_n_s_pygame_surfarray, __pyx_t_4, -1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_array3d); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_array3d, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_pixels3d); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pixels3d, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_array_alpha); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_array_alpha, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_ImportFrom(__pyx_t_5, __pyx_n_s_pixels_alpha); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pixels_alpha, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L8_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":22 * from numpy import empty, uint8, asarray * * try: # <<<<<<<<<<<<<< * import pygame * from pygame import Rect */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L13_try_end; __pyx_L8_error:; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "Surface_tools.pyx":31 * from pygame.surfarray import array3d, pixels3d, array_alpha, pixels_alpha * * except ImportError: # <<<<<<<<<<<<<< * raise ImportError("\n<Pygame> library is missing on your system." * "\nTry: \n C:\\pip install pygame on a window command prompt.") */ __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ImportError); if (__pyx_t_6) { __Pyx_AddTraceback("Surface_tools", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(0, 31, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_7); /* "Surface_tools.pyx":32 * * except ImportError: * raise ImportError("\n<Pygame> library is missing on your system." # <<<<<<<<<<<<<< * "\nTry: \n C:\\pip install pygame on a window command prompt.") * */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 32, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(0, 32, __pyx_L10_except_error) } goto __pyx_L10_except_error; __pyx_L10_except_error:; /* "Surface_tools.pyx":22 * from numpy import empty, uint8, asarray * * try: # <<<<<<<<<<<<<< * import pygame * from pygame import Rect */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_1); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); goto __pyx_L1_error; __pyx_L13_try_end:; } /* "Surface_tools.pyx":1 * # encoding: utf-8 # <<<<<<<<<<<<<< * ###cython: boundscheck=False, wraparound=False, nonecheck=False, optimize.use_switch=True * */ __pyx_t_7 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_7) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":207 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_7 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_7) < 0) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":284 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":285 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":286 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":289 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":290 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":314 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":315 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_9[0] = PyThread_allocate_lock(); __pyx_t_9[1] = PyThread_allocate_lock(); __pyx_t_9[2] = PyThread_allocate_lock(); __pyx_t_9[3] = PyThread_allocate_lock(); __pyx_t_9[4] = PyThread_allocate_lock(); __pyx_t_9[5] = PyThread_allocate_lock(); __pyx_t_9[6] = PyThread_allocate_lock(); __pyx_t_9[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_9, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":537 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_7) < 0) __PYX_ERR(1, 537, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":983 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_7 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_7) < 0) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_t_7 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_7) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init Surface_tools", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init Surface_tools"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_USE_TYPE_SLOTS result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if PY_VERSION_HEX >= 0x030700A2 *type = tstate->exc_state.exc_type; *value = tstate->exc_state.exc_value; *tb = tstate->exc_state.exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = type; tstate->exc_state.exc_value = value; tstate->exc_state.exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = local_type; tstate->exc_state.exc_value = local_value; tstate->exc_state.exc_traceback = local_tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* SetItemInt */ static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return -1; PyErr_Clear(); } } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) { #else if (is_list || PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = *type; tstate->exc_state.exc_value = *value; tstate->exc_state.exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_char(unsigned char value) { const unsigned char neg_one = (unsigned char) -1, const_zero = (unsigned char) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned char) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned char) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(unsigned char) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned char), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_unsigned_char(const char *itemp) { return (PyObject *) __Pyx_PyInt_From_unsigned_char(*(unsigned char *) itemp); } static CYTHON_INLINE int __pyx_memview_set_unsigned_char(const char *itemp, PyObject *obj) { unsigned char value = __Pyx_PyInt_As_unsigned_char(obj); if ((value == (unsigned char)-1) && PyErr_Occurred()) return 0; *(unsigned char *) itemp = value; return 1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE unsigned char __Pyx_PyInt_As_unsigned_char(PyObject *x) { const unsigned char neg_one = (unsigned char) -1, const_zero = (unsigned char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (unsigned char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned char) 0; case 1: __PYX_VERIFY_RETURN_INT(unsigned char, digit, digits[0]) case 2: if (8 * sizeof(unsigned char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) >= 2 * PyLong_SHIFT) { return (unsigned char) (((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])); } } break; case 3: if (8 * sizeof(unsigned char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) >= 3 * PyLong_SHIFT) { return (unsigned char) (((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])); } } break; case 4: if (8 * sizeof(unsigned char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) >= 4 * PyLong_SHIFT) { return (unsigned char) (((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (unsigned char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(unsigned char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (unsigned char) 0; case -1: __PYX_VERIFY_RETURN_INT(unsigned char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(unsigned char, digit, +digits[0]) case -2: if (8 * sizeof(unsigned char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT) { return (unsigned char) (((unsigned char)-1)*(((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case 2: if (8 * sizeof(unsigned char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT) { return (unsigned char) ((((((unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case -3: if (8 * sizeof(unsigned char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT) { return (unsigned char) (((unsigned char)-1)*(((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case 3: if (8 * sizeof(unsigned char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT) { return (unsigned char) ((((((((unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case -4: if (8 * sizeof(unsigned char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 4 * PyLong_SHIFT) { return (unsigned char) (((unsigned char)-1)*(((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; case 4: if (8 * sizeof(unsigned char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(unsigned char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(unsigned char) - 1 > 4 * PyLong_SHIFT) { return (unsigned char) ((((((((((unsigned char)digits[3]) << PyLong_SHIFT) | (unsigned char)digits[2]) << PyLong_SHIFT) | (unsigned char)digits[1]) << PyLong_SHIFT) | (unsigned char)digits[0]))); } } break; } #endif if (sizeof(unsigned char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(unsigned char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(unsigned char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned char) -1; } } else { unsigned char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (unsigned char) -1; val = __Pyx_PyInt_As_unsigned_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned char"); return (unsigned char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned char"); return (unsigned char) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 3, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 2, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_unsigned_char(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 3, &__Pyx_TypeInfo_unsigned_char, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
_prism.c
/* Generated by Cython 0.20.1 on Thu Jul 17 17:54:14 2014 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__fatiando__gravmag___prism #define __PYX_HAVE_API__fatiando__gravmag___prism #include "math.h" #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "omp.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char* default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_prism.pyx", "__init__.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fatiando/gravmag/_prism.pyx":16 * * DTYPE = numpy.float * ctypedef numpy.float_t DTYPE_T # <<<<<<<<<<<<<< * * cdef inline double safe_atan2(double y, double x) nogil: */ typedef __pyx_t_5numpy_float_t __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'openmp' */ /* Module declarations from 'fatiando.gravmag._prism' */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2(double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_safe_log(double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelpot(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelx(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernely(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelz(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelxx(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelxy(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelxz(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelyy(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelyz(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelzz(double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T = { "DTYPE_T", NULL, sizeof(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "fatiando.gravmag._prism" int __pyx_module_is_main_fatiando__gravmag___prism = 0; /* Implementation of 'fatiando.gravmag._prism' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_2bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_4by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_6bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_8gx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_10gy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_12gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_14gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_16gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_18gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_20gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_22gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_24gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_26potential(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_B[] = "B"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_j[] = "j"; static char __pyx_k_k[] = "k"; static char __pyx_k_l[] = "l"; static char __pyx_k_q[] = "q"; static char __pyx_k_r[] = "r"; static char __pyx_k_x[] = "x"; static char __pyx_k_y[] = "y"; static char __pyx_k_z[] = "z"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_bx[] = "bx"; static char __pyx_k_by[] = "by"; static char __pyx_k_bz[] = "bz"; static char __pyx_k_dx[] = "dx"; static char __pyx_k_dy[] = "dy"; static char __pyx_k_dz[] = "dz"; static char __pyx_k_fx[] = "fx"; static char __pyx_k_fy[] = "fy"; static char __pyx_k_fz[] = "fz"; static char __pyx_k_gx[] = "gx"; static char __pyx_k_gy[] = "gy"; static char __pyx_k_gz[] = "gz"; static char __pyx_k_mx[] = "mx"; static char __pyx_k_my[] = "my"; static char __pyx_k_mz[] = "mz"; static char __pyx_k_tf[] = "tf"; static char __pyx_k_v1[] = "v1"; static char __pyx_k_v2[] = "v2"; static char __pyx_k_v3[] = "v3"; static char __pyx_k_v4[] = "v4"; static char __pyx_k_v5[] = "v5"; static char __pyx_k_v6[] = "v6"; static char __pyx_k_x1[] = "x1"; static char __pyx_k_x2[] = "x2"; static char __pyx_k_xp[] = "xp"; static char __pyx_k_y1[] = "y1"; static char __pyx_k_y2[] = "y2"; static char __pyx_k_yp[] = "yp"; static char __pyx_k_z1[] = "z1"; static char __pyx_k_z2[] = "z2"; static char __pyx_k_zp[] = "zp"; static char __pyx_k_gxx[] = "gxx"; static char __pyx_k_gxy[] = "gxy"; static char __pyx_k_gxz[] = "gxz"; static char __pyx_k_gyy[] = "gyy"; static char __pyx_k_gyz[] = "gyz"; static char __pyx_k_gzz[] = "gzz"; static char __pyx_k_res[] = "res"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_size[] = "size"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_tmp1[] = "tmp1"; static char __pyx_k_tmp2[] = "tmp2"; static char __pyx_k_DTYPE[] = "DTYPE"; static char __pyx_k_array[] = "array"; static char __pyx_k_dtype[] = "dtype"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kernel[] = "kernel"; static char __pyx_k_density[] = "density"; static char __pyx_k_potential[] = "potential"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_fatiando_gravmag__prism[] = "fatiando.gravmag._prism"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_home_leo_src_fatiando_fatiando[] = "/home/leo/src/fatiando/fatiando/gravmag/_prism.pyx"; static char __pyx_k_Cython_implementation_of_the_gr[] = "\nCython implementation of the gravity and magnetic fields of right rectangular\nprisms.\n"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_array; static PyObject *__pyx_n_s_bx; static PyObject *__pyx_n_s_by; static PyObject *__pyx_n_s_bz; static PyObject *__pyx_n_s_density; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dx; static PyObject *__pyx_n_s_dy; static PyObject *__pyx_n_s_dz; static PyObject *__pyx_n_s_fatiando_gravmag__prism; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_fx; static PyObject *__pyx_n_s_fy; static PyObject *__pyx_n_s_fz; static PyObject *__pyx_n_s_gx; static PyObject *__pyx_n_s_gxx; static PyObject *__pyx_n_s_gxy; static PyObject *__pyx_n_s_gxz; static PyObject *__pyx_n_s_gy; static PyObject *__pyx_n_s_gyy; static PyObject *__pyx_n_s_gyz; static PyObject *__pyx_n_s_gz; static PyObject *__pyx_n_s_gzz; static PyObject *__pyx_kp_s_home_leo_src_fatiando_fatiando; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_kernel; static PyObject *__pyx_n_s_l; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mx; static PyObject *__pyx_n_s_my; static PyObject *__pyx_n_s_mz; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_potential; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_r; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tf; static PyObject *__pyx_n_s_tmp1; static PyObject *__pyx_n_s_tmp2; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_v1; static PyObject *__pyx_n_s_v2; static PyObject *__pyx_n_s_v3; static PyObject *__pyx_n_s_v4; static PyObject *__pyx_n_s_v5; static PyObject *__pyx_n_s_v6; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_x1; static PyObject *__pyx_n_s_x2; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_y1; static PyObject *__pyx_n_s_y2; static PyObject *__pyx_n_s_yp; static PyObject *__pyx_n_s_z; static PyObject *__pyx_n_s_z1; static PyObject *__pyx_n_s_z2; static PyObject *__pyx_n_s_zp; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__33; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; static PyObject *__pyx_codeobj__30; static PyObject *__pyx_codeobj__32; static PyObject *__pyx_codeobj__34; /* "fatiando/gravmag/_prism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double safe_atan2(double y, double x) nogil: # <<<<<<<<<<<<<< * cdef double res * if y == 0: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2(double __pyx_v_y, double __pyx_v_x) { double __pyx_v_res; double __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "fatiando/gravmag/_prism.pyx":20 * cdef inline double safe_atan2(double y, double x) nogil: * cdef double res * if y == 0: # <<<<<<<<<<<<<< * res = 0 * elif (y > 0) and (x < 0): */ __pyx_t_1 = ((__pyx_v_y == 0.0) != 0); if (__pyx_t_1) { /* "fatiando/gravmag/_prism.pyx":21 * cdef double res * if y == 0: * res = 0 # <<<<<<<<<<<<<< * elif (y > 0) and (x < 0): * res = atan2(y, x) - 3.1415926535897931159979634685441851615906 */ __pyx_v_res = 0.0; goto __pyx_L3; } /* "fatiando/gravmag/_prism.pyx":22 * if y == 0: * res = 0 * elif (y > 0) and (x < 0): # <<<<<<<<<<<<<< * res = atan2(y, x) - 3.1415926535897931159979634685441851615906 * elif (y < 0) and (x < 0): */ __pyx_t_1 = ((__pyx_v_y > 0.0) != 0); if (__pyx_t_1) { __pyx_t_2 = ((__pyx_v_x < 0.0) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "fatiando/gravmag/_prism.pyx":23 * res = 0 * elif (y > 0) and (x < 0): * res = atan2(y, x) - 3.1415926535897931159979634685441851615906 # <<<<<<<<<<<<<< * elif (y < 0) and (x < 0): * res = atan2(y, x) + 3.1415926535897931159979634685441851615906 */ __pyx_v_res = (atan2(__pyx_v_y, __pyx_v_x) - 3.1415926535897931159979634685441851615906); goto __pyx_L3; } /* "fatiando/gravmag/_prism.pyx":24 * elif (y > 0) and (x < 0): * res = atan2(y, x) - 3.1415926535897931159979634685441851615906 * elif (y < 0) and (x < 0): # <<<<<<<<<<<<<< * res = atan2(y, x) + 3.1415926535897931159979634685441851615906 * else: */ __pyx_t_3 = ((__pyx_v_y < 0.0) != 0); if (__pyx_t_3) { __pyx_t_1 = ((__pyx_v_x < 0.0) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "fatiando/gravmag/_prism.pyx":25 * res = atan2(y, x) - 3.1415926535897931159979634685441851615906 * elif (y < 0) and (x < 0): * res = atan2(y, x) + 3.1415926535897931159979634685441851615906 # <<<<<<<<<<<<<< * else: * res = atan2(y, x) */ __pyx_v_res = (atan2(__pyx_v_y, __pyx_v_x) + 3.1415926535897931159979634685441851615906); goto __pyx_L3; } /*else*/ { /* "fatiando/gravmag/_prism.pyx":27 * res = atan2(y, x) + 3.1415926535897931159979634685441851615906 * else: * res = atan2(y, x) # <<<<<<<<<<<<<< * return res * */ __pyx_v_res = atan2(__pyx_v_y, __pyx_v_x); } __pyx_L3:; /* "fatiando/gravmag/_prism.pyx":28 * else: * res = atan2(y, x) * return res # <<<<<<<<<<<<<< * * cdef inline double safe_log(double x) nogil: */ __pyx_r = __pyx_v_res; goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double safe_atan2(double y, double x) nogil: # <<<<<<<<<<<<<< * cdef double res * if y == 0: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":30 * return res * * cdef inline double safe_log(double x) nogil: # <<<<<<<<<<<<<< * cdef double res * if x == 0: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_safe_log(double __pyx_v_x) { double __pyx_v_res; double __pyx_r; int __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":32 * cdef inline double safe_log(double x) nogil: * cdef double res * if x == 0: # <<<<<<<<<<<<<< * res = 0 * else: */ __pyx_t_1 = ((__pyx_v_x == 0.0) != 0); if (__pyx_t_1) { /* "fatiando/gravmag/_prism.pyx":33 * cdef double res * if x == 0: * res = 0 # <<<<<<<<<<<<<< * else: * res = log(x) */ __pyx_v_res = 0.0; goto __pyx_L3; } /*else*/ { /* "fatiando/gravmag/_prism.pyx":35 * res = 0 * else: * res = log(x) # <<<<<<<<<<<<<< * return res * */ __pyx_v_res = log(__pyx_v_x); } __pyx_L3:; /* "fatiando/gravmag/_prism.pyx":36 * else: * res = log(x) * return res # <<<<<<<<<<<<<< * * cdef inline double kernelpot(double x, double y, double z, double r) nogil: */ __pyx_r = __pyx_v_res; goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":30 * return res * * cdef inline double safe_log(double x) nogil: # <<<<<<<<<<<<<< * cdef double res * if x == 0: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":38 * return res * * cdef inline double kernelpot(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return (x*y*safe_log(z + r) + y*z*safe_log(x + r) + x*z*safe_log(y + r) * - 0.5*x**2*safe_atan2(z*y, x*r) - 0.5*y**2*safe_atan2(z*x, y*r) */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelpot(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":41 * return (x*y*safe_log(z + r) + y*z*safe_log(x + r) + x*z*safe_log(y + r) * - 0.5*x**2*safe_atan2(z*y, x*r) - 0.5*y**2*safe_atan2(z*x, y*r) * - 0.5*z**2*safe_atan2(x*y, z*r)) # <<<<<<<<<<<<<< * * # Minus in gravity because Nagy et al (2000) give the formula for the gradient */ __pyx_r = (((((((__pyx_v_x * __pyx_v_y) * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_z + __pyx_v_r))) + ((__pyx_v_y * __pyx_v_z) * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_x + __pyx_v_r)))) + ((__pyx_v_x * __pyx_v_z) * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_y + __pyx_v_r)))) - ((0.5 * pow(__pyx_v_x, 2.0)) * __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_z * __pyx_v_y), (__pyx_v_x * __pyx_v_r)))) - ((0.5 * pow(__pyx_v_y, 2.0)) * __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_z * __pyx_v_x), (__pyx_v_y * __pyx_v_r)))) - ((0.5 * pow(__pyx_v_z, 2.0)) * __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_x * __pyx_v_y), (__pyx_v_z * __pyx_v_r)))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":38 * return res * * cdef inline double kernelpot(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return (x*y*safe_log(z + r) + y*z*safe_log(x + r) + x*z*safe_log(y + r) * - 0.5*x**2*safe_atan2(z*y, x*r) - 0.5*y**2*safe_atan2(z*x, y*r) */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":45 * # Minus in gravity because Nagy et al (2000) give the formula for the gradient * # of the potential. Gravity is -grad(V). * cdef inline double kernelx(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -(y*safe_log(z + r) + z*safe_log(y + r) - x*safe_atan2(z*y, x*r)) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelx(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":46 * # of the potential. Gravity is -grad(V). * cdef inline double kernelx(double x, double y, double z, double r) nogil: * return -(y*safe_log(z + r) + z*safe_log(y + r) - x*safe_atan2(z*y, x*r)) # <<<<<<<<<<<<<< * * cdef inline double kernely(double x, double y, double z, double r) nogil: */ __pyx_r = (-(((__pyx_v_y * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_z + __pyx_v_r))) + (__pyx_v_z * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_y + __pyx_v_r)))) - (__pyx_v_x * __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_z * __pyx_v_y), (__pyx_v_x * __pyx_v_r))))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":45 * # Minus in gravity because Nagy et al (2000) give the formula for the gradient * # of the potential. Gravity is -grad(V). * cdef inline double kernelx(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -(y*safe_log(z + r) + z*safe_log(y + r) - x*safe_atan2(z*y, x*r)) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":48 * return -(y*safe_log(z + r) + z*safe_log(y + r) - x*safe_atan2(z*y, x*r)) * * cdef inline double kernely(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -(z*safe_log(x + r) + x*safe_log(z + r) - y*safe_atan2(x*z, y*r)) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernely(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":49 * * cdef inline double kernely(double x, double y, double z, double r) nogil: * return -(z*safe_log(x + r) + x*safe_log(z + r) - y*safe_atan2(x*z, y*r)) # <<<<<<<<<<<<<< * * cdef inline double kernelz(double x, double y, double z, double r) nogil: */ __pyx_r = (-(((__pyx_v_z * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_x + __pyx_v_r))) + (__pyx_v_x * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_z + __pyx_v_r)))) - (__pyx_v_y * __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_x * __pyx_v_z), (__pyx_v_y * __pyx_v_r))))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":48 * return -(y*safe_log(z + r) + z*safe_log(y + r) - x*safe_atan2(z*y, x*r)) * * cdef inline double kernely(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -(z*safe_log(x + r) + x*safe_log(z + r) - y*safe_atan2(x*z, y*r)) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":51 * return -(z*safe_log(x + r) + x*safe_log(z + r) - y*safe_atan2(x*z, y*r)) * * cdef inline double kernelz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -(x*safe_log(y + r) + y*safe_log(x + r) - z*safe_atan2(x*y, z*r)) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelz(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":52 * * cdef inline double kernelz(double x, double y, double z, double r) nogil: * return -(x*safe_log(y + r) + y*safe_log(x + r) - z*safe_atan2(x*y, z*r)) # <<<<<<<<<<<<<< * * cdef inline double kernelxx(double x, double y, double z, double r) nogil: */ __pyx_r = (-(((__pyx_v_x * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_y + __pyx_v_r))) + (__pyx_v_y * __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_x + __pyx_v_r)))) - (__pyx_v_z * __pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_x * __pyx_v_y), (__pyx_v_z * __pyx_v_r))))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":51 * return -(z*safe_log(x + r) + x*safe_log(z + r) - y*safe_atan2(x*z, y*r)) * * cdef inline double kernelz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -(x*safe_log(y + r) + y*safe_log(x + r) - z*safe_atan2(x*y, z*r)) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":54 * return -(x*safe_log(y + r) + y*safe_log(x + r) - z*safe_atan2(x*y, z*r)) * * cdef inline double kernelxx(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -safe_atan2(z*y, x*r) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelxx(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":55 * * cdef inline double kernelxx(double x, double y, double z, double r) nogil: * return -safe_atan2(z*y, x*r) # <<<<<<<<<<<<<< * * cdef inline double kernelxy(double x, double y, double z, double r) nogil: */ __pyx_r = (-__pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_z * __pyx_v_y), (__pyx_v_x * __pyx_v_r))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":54 * return -(x*safe_log(y + r) + y*safe_log(x + r) - z*safe_atan2(x*y, z*r)) * * cdef inline double kernelxx(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -safe_atan2(z*y, x*r) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":57 * return -safe_atan2(z*y, x*r) * * cdef inline double kernelxy(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return safe_log(z + r) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelxy(CYTHON_UNUSED double __pyx_v_x, CYTHON_UNUSED double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":58 * * cdef inline double kernelxy(double x, double y, double z, double r) nogil: * return safe_log(z + r) # <<<<<<<<<<<<<< * * cdef inline double kernelxz(double x, double y, double z, double r) nogil: */ __pyx_r = __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_z + __pyx_v_r)); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":57 * return -safe_atan2(z*y, x*r) * * cdef inline double kernelxy(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return safe_log(z + r) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":60 * return safe_log(z + r) * * cdef inline double kernelxz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return safe_log(y + r) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelxz(CYTHON_UNUSED double __pyx_v_x, double __pyx_v_y, CYTHON_UNUSED double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":61 * * cdef inline double kernelxz(double x, double y, double z, double r) nogil: * return safe_log(y + r) # <<<<<<<<<<<<<< * * cdef inline double kernelyy(double x, double y, double z, double r) nogil: */ __pyx_r = __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_y + __pyx_v_r)); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":60 * return safe_log(z + r) * * cdef inline double kernelxz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return safe_log(y + r) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":63 * return safe_log(y + r) * * cdef inline double kernelyy(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -safe_atan2(z*x, y*r) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelyy(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":64 * * cdef inline double kernelyy(double x, double y, double z, double r) nogil: * return -safe_atan2(z*x, y*r) # <<<<<<<<<<<<<< * * cdef inline double kernelyz(double x, double y, double z, double r) nogil: */ __pyx_r = (-__pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_z * __pyx_v_x), (__pyx_v_y * __pyx_v_r))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":63 * return safe_log(y + r) * * cdef inline double kernelyy(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -safe_atan2(z*x, y*r) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":66 * return -safe_atan2(z*x, y*r) * * cdef inline double kernelyz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return safe_log(x + r) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelyz(double __pyx_v_x, CYTHON_UNUSED double __pyx_v_y, CYTHON_UNUSED double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":67 * * cdef inline double kernelyz(double x, double y, double z, double r) nogil: * return safe_log(x + r) # <<<<<<<<<<<<<< * * cdef inline double kernelzz(double x, double y, double z, double r) nogil: */ __pyx_r = __pyx_f_8fatiando_7gravmag_6_prism_safe_log((__pyx_v_x + __pyx_v_r)); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":66 * return -safe_atan2(z*x, y*r) * * cdef inline double kernelyz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return safe_log(x + r) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":69 * return safe_log(x + r) * * cdef inline double kernelzz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -safe_atan2(x*y, z*r) * */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_6_prism_kernelzz(double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, double __pyx_v_r) { double __pyx_r; /* "fatiando/gravmag/_prism.pyx":70 * * cdef inline double kernelzz(double x, double y, double z, double r) nogil: * return -safe_atan2(x*y, z*r) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_r = (-__pyx_f_8fatiando_7gravmag_6_prism_safe_atan2((__pyx_v_x * __pyx_v_y), (__pyx_v_z * __pyx_v_r))); goto __pyx_L0; /* "fatiando/gravmag/_prism.pyx":69 * return safe_log(x + r) * * cdef inline double kernelzz(double x, double y, double z, double r) nogil: # <<<<<<<<<<<<<< * return -safe_atan2(x*y, z*r) * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":74 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_1tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_tf[] = "tf(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double mx, double my, double mz, double fx, double fy, double fz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_1tf = {__Pyx_NAMESTR("tf"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_1tf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_tf)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_1tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; double __pyx_v_fx; double __pyx_v_fy; double __pyx_v_fz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("tf (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_fx,&__pyx_n_s_fy,&__pyx_n_s_fz,&__pyx_n_s_res,0}; PyObject* values[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15); case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14); case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 13: if (likely((values[13] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 13); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 14: if (likely((values[14] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 14); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 15: if (likely((values[15] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, 15); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 16) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); values[13] = PyTuple_GET_ITEM(__pyx_args, 13); values[14] = PyTuple_GET_ITEM(__pyx_args, 14); values[15] = PyTuple_GET_ITEM(__pyx_args, 15); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fx = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_fx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fy = __pyx_PyFloat_AsDouble(values[13]); if (unlikely((__pyx_v_fy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fz = __pyx_PyFloat_AsDouble(values[14]); if (unlikely((__pyx_v_fz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[15]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("tf", 1, 16, 16, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_tf(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_fx, __pyx_v_fy, __pyx_v_fz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_bx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_by; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_bz; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tf", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":83 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, v1, v2, v3, v4, v5, v6, bx, by, bz, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":84 * cdef DTYPE_T kernel, r, v1, v2, v3, v4, v5, v6, bx, by, bz, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":85 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":86 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":87 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":88 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_r) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_v4) lastprivate(__pyx_v_i) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_by) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_v6) lastprivate(__pyx_v_v5) lastprivate(__pyx_v_v2) lastprivate(__pyx_v_v1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_v3) lastprivate(__pyx_v_bx) lastprivate(__pyx_v_j) lastprivate(__pyx_v_bz) lastprivate(__pyx_v_kernel) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v4 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_by = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v6 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v5 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v2 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v1 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_v3 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_bx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_bz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":90 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":91 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":92 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":93 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":94 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":95 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * v1 = kernelxx(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":96 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * v1 = kernelxx(dx, dy, dz, r) * v2 = kernelxy(dx, dy, dz, r) */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":97 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * v1 = kernelxx(dx, dy, dz, r) # <<<<<<<<<<<<<< * v2 = kernelxy(dx, dy, dz, r) * v3 = kernelxz(dx, dy, dz, r) */ __pyx_v_v1 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxx(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":98 * r = sqrt(dx**2 + dy**2 + dz**2) * v1 = kernelxx(dx, dy, dz, r) * v2 = kernelxy(dx, dy, dz, r) # <<<<<<<<<<<<<< * v3 = kernelxz(dx, dy, dz, r) * v4 = kernelyy(dx, dy, dz, r) */ __pyx_v_v2 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":99 * v1 = kernelxx(dx, dy, dz, r) * v2 = kernelxy(dx, dy, dz, r) * v3 = kernelxz(dx, dy, dz, r) # <<<<<<<<<<<<<< * v4 = kernelyy(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) */ __pyx_v_v3 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":100 * v2 = kernelxy(dx, dy, dz, r) * v3 = kernelxz(dx, dy, dz, r) * v4 = kernelyy(dx, dy, dz, r) # <<<<<<<<<<<<<< * v5 = kernelyz(dx, dy, dz, r) * v6 = kernelzz(dx, dy, dz, r) */ __pyx_v_v4 = __pyx_f_8fatiando_7gravmag_6_prism_kernelyy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":101 * v3 = kernelxz(dx, dy, dz, r) * v4 = kernelyy(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) # <<<<<<<<<<<<<< * v6 = kernelzz(dx, dy, dz, r) * bx = (v1*mx + v2*my + v3*mz) */ __pyx_v_v5 = __pyx_f_8fatiando_7gravmag_6_prism_kernelyz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":102 * v4 = kernelyy(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) * v6 = kernelzz(dx, dy, dz, r) # <<<<<<<<<<<<<< * bx = (v1*mx + v2*my + v3*mz) * by = (v2*mx + v4*my + v5*mz) */ __pyx_v_v6 = __pyx_f_8fatiando_7gravmag_6_prism_kernelzz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":103 * v5 = kernelyz(dx, dy, dz, r) * v6 = kernelzz(dx, dy, dz, r) * bx = (v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * by = (v2*mx + v4*my + v5*mz) * bz = (v3*mx + v5*my + v6*mz) */ __pyx_v_bx = (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); /* "fatiando/gravmag/_prism.pyx":104 * v6 = kernelzz(dx, dy, dz, r) * bx = (v1*mx + v2*my + v3*mz) * by = (v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * bz = (v3*mx + v5*my + v6*mz) * kernel = fx*bx + fy*by + fz*bz */ __pyx_v_by = (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); /* "fatiando/gravmag/_prism.pyx":105 * bx = (v1*mx + v2*my + v3*mz) * by = (v2*mx + v4*my + v5*mz) * bz = (v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< * kernel = fx*bx + fy*by + fz*bz * res[l] += ((-1.)**(i + j + k))*kernel */ __pyx_v_bz = (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); /* "fatiando/gravmag/_prism.pyx":106 * by = (v2*mx + v4*my + v5*mz) * bz = (v3*mx + v5*my + v6*mz) * kernel = fx*bx + fy*by + fz*bz # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel * */ __pyx_v_kernel = (((__pyx_v_fx * __pyx_v_bx) + (__pyx_v_fy * __pyx_v_by)) + (__pyx_v_fz * __pyx_v_bz)); /* "fatiando/gravmag/_prism.pyx":107 * bz = (v3*mx + v5*my + v6*mz) * kernel = fx*bx + fy*by + fz*bz * res[l] += ((-1.)**(i + j + k))*kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += (pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":87 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":74 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":111 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_3bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_2bx[] = "bx(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_3bx = {__Pyx_NAMESTR("bx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_3bx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_2bx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_3bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 13) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[12]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bx", 1, 13, 13, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_2bx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_2bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bx", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":120 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, v1, v2, v3, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":121 * cdef DTYPE_T kernel, r, v1, v2, v3, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":122 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":123 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":124 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":125 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_k) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_v1) lastprivate(__pyx_v_r) lastprivate(__pyx_v_j) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_i) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_v3) lastprivate(__pyx_v_v2) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_dz) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_v1 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v3 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v2 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":127 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":128 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":129 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":130 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":131 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":132 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * v1 = kernelxx(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":133 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * v1 = kernelxx(dx, dy, dz, r) * v2 = kernelxy(dx, dy, dz, r) */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":134 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * v1 = kernelxx(dx, dy, dz, r) # <<<<<<<<<<<<<< * v2 = kernelxy(dx, dy, dz, r) * v3 = kernelxz(dx, dy, dz, r) */ __pyx_v_v1 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxx(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":135 * r = sqrt(dx**2 + dy**2 + dz**2) * v1 = kernelxx(dx, dy, dz, r) * v2 = kernelxy(dx, dy, dz, r) # <<<<<<<<<<<<<< * v3 = kernelxz(dx, dy, dz, r) * kernel = (v1*mx + v2*my + v3*mz) */ __pyx_v_v2 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":136 * v1 = kernelxx(dx, dy, dz, r) * v2 = kernelxy(dx, dy, dz, r) * v3 = kernelxz(dx, dy, dz, r) # <<<<<<<<<<<<<< * kernel = (v1*mx + v2*my + v3*mz) * res[l] += ((-1.)**(i + j + k))*kernel */ __pyx_v_v3 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":137 * v2 = kernelxy(dx, dy, dz, r) * v3 = kernelxz(dx, dy, dz, r) * kernel = (v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel * */ __pyx_v_kernel = (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); /* "fatiando/gravmag/_prism.pyx":138 * v3 = kernelxz(dx, dy, dz, r) * kernel = (v1*mx + v2*my + v3*mz) * res[l] += ((-1.)**(i + j + k))*kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += (pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":124 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":111 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":142 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_5by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_4by[] = "by(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_5by = {__Pyx_NAMESTR("by"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_5by, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_4by)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_5by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("by (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "by") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 13) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[12]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("by", 1, 13, 13, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_4by(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_4by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("by", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":151 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, v2, v4, v5, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":152 * cdef DTYPE_T kernel, r, v2, v4, v5, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":153 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":154 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":155 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":156 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_k) lastprivate(__pyx_v_j) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_i) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_v4) lastprivate(__pyx_v_v2) lastprivate(__pyx_v_v5) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v4 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v2 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v5 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":158 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":159 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":160 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":161 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":162 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":163 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * v2 = kernelxy(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":164 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * v2 = kernelxy(dx, dy, dz, r) * v4 = kernelyy(dx, dy, dz, r) */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":165 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * v2 = kernelxy(dx, dy, dz, r) # <<<<<<<<<<<<<< * v4 = kernelyy(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) */ __pyx_v_v2 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":166 * r = sqrt(dx**2 + dy**2 + dz**2) * v2 = kernelxy(dx, dy, dz, r) * v4 = kernelyy(dx, dy, dz, r) # <<<<<<<<<<<<<< * v5 = kernelyz(dx, dy, dz, r) * kernel = (v2*mx + v4*my + v5*mz) */ __pyx_v_v4 = __pyx_f_8fatiando_7gravmag_6_prism_kernelyy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":167 * v2 = kernelxy(dx, dy, dz, r) * v4 = kernelyy(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) # <<<<<<<<<<<<<< * kernel = (v2*mx + v4*my + v5*mz) * res[l] += ((-1.)**(i + j + k))*kernel */ __pyx_v_v5 = __pyx_f_8fatiando_7gravmag_6_prism_kernelyz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":168 * v4 = kernelyy(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) * kernel = (v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel * */ __pyx_v_kernel = (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); /* "fatiando/gravmag/_prism.pyx":169 * v5 = kernelyz(dx, dy, dz, r) * kernel = (v2*mx + v4*my + v5*mz) * res[l] += ((-1.)**(i + j + k))*kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += (pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":155 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":142 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":173 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_7bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_6bz[] = "bz(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_7bz = {__Pyx_NAMESTR("bz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_7bz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_6bz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_7bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 13) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[12]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bz", 1, 13, 13, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_6bz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_6bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bz", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":182 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, v3, v5, v6, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":183 * cdef DTYPE_T kernel, r, v3, v5, v6, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":184 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":185 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":186 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":187 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_v3) lastprivate(__pyx_v_j) lastprivate(__pyx_v_v5) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_v6) lastprivate(__pyx_v_i) lastprivate(__pyx_v_k) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_r) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v3 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_v5 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_v6 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":189 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":190 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":191 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":192 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":193 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":194 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * v3 = kernelxz(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":195 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * v3 = kernelxz(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":196 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * v3 = kernelxz(dx, dy, dz, r) # <<<<<<<<<<<<<< * v5 = kernelyz(dx, dy, dz, r) * v6 = kernelzz(dx, dy, dz, r) */ __pyx_v_v3 = __pyx_f_8fatiando_7gravmag_6_prism_kernelxz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":197 * r = sqrt(dx**2 + dy**2 + dz**2) * v3 = kernelxz(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) # <<<<<<<<<<<<<< * v6 = kernelzz(dx, dy, dz, r) * kernel = (v3*mx + v5*my + v6*mz) */ __pyx_v_v5 = __pyx_f_8fatiando_7gravmag_6_prism_kernelyz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":198 * v3 = kernelxz(dx, dy, dz, r) * v5 = kernelyz(dx, dy, dz, r) * v6 = kernelzz(dx, dy, dz, r) # <<<<<<<<<<<<<< * kernel = (v3*mx + v5*my + v6*mz) * res[l] += ((-1.)**(i + j + k))*kernel */ __pyx_v_v6 = __pyx_f_8fatiando_7gravmag_6_prism_kernelzz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":199 * v5 = kernelyz(dx, dy, dz, r) * v6 = kernelzz(dx, dy, dz, r) * kernel = (v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel * */ __pyx_v_kernel = (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); /* "fatiando/gravmag/_prism.pyx":200 * v6 = kernelzz(dx, dy, dz, r) * kernel = (v3*mx + v5*my + v6*mz) * res[l] += ((-1.)**(i + j + k))*kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += (pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":186 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":173 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":204 * @cython.wraparound(False) * @cython.boundscheck(False) * def gx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_9gx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_8gx[] = "gx(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_9gx = {__Pyx_NAMESTR("gx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_9gx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_8gx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_9gx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_8gx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_8gx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gx", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":213 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":214 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":215 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":216 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":217 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":218 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_i) lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_k) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_j = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_prism.pyx":220 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":221 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":222 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":223 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":224 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":225 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelx(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":226 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelx(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":227 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelx(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelx(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":228 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelx(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":217 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":204 * @cython.wraparound(False) * @cython.boundscheck(False) * def gx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":232 * @cython.wraparound(False) * @cython.boundscheck(False) * def gy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_11gy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_10gy[] = "gy(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_11gy = {__Pyx_NAMESTR("gy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_11gy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_10gy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_11gy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gy", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_10gy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_10gy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gy", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":241 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":242 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":243 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":244 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":245 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":246 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_j) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_i) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_k) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_prism.pyx":248 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":249 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":250 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":251 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":252 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":253 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernely(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":254 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernely(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":255 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernely(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernely(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":256 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernely(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":245 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":232 * @cython.wraparound(False) * @cython.boundscheck(False) * def gy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":260 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_13gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_12gz[] = "gz(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_13gz = {__Pyx_NAMESTR("gz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_13gz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_12gz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_13gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 263; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 261; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 262; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_12gz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_12gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gz", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":269 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":270 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 270; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":271 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":272 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":273 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":274 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_k) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_j = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_prism.pyx":276 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":277 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":278 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":279 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":280 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":281 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelz(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":282 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":283 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelz(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":284 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":273 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":260 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":288 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_15gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_14gxx[] = "gxx(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_15gxx = {__Pyx_NAMESTR("gxx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_15gxx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_14gxx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_15gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 292; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 293; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_14gxx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_14gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxx", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":297 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 297; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":298 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 298; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":299 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 299; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":300 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 300; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":301 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":302 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_i) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_j) lastprivate(__pyx_v_r) lastprivate(__pyx_v_k) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_kernel) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":304 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":305 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":306 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":307 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":308 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":309 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxx(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":310 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelxx(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":311 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxx(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelxx(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":312 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxx(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":301 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":288 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":316 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_17gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_16gxy[] = "gxy(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_17gxy = {__Pyx_NAMESTR("gxy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_17gxy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_16gxy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_17gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 319; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 320; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxy", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 318; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_16gxy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_16gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_tmp1; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_tmp2; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; int __pyx_t_25; int __pyx_t_26; unsigned int __pyx_t_27; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxy", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":325 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz, tmp1, tmp2 * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 325; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":326 * cdef DTYPE_T kernel, r, dx, dy, dz, tmp1, tmp2 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 326; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":327 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":328 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":329 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":330 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_26, __pyx_t_18, __pyx_t_23, __pyx_t_25, __pyx_t_20, __pyx_t_27, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_24, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_j) lastprivate(__pyx_v_tmp1) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_i) lastprivate(__pyx_v_dx) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_tmp2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_r) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_tmp1 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_tmp2 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":332 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":333 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":334 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":335 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":336 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * if dx == 0 and dy == 0 and dz < 0: */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":337 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * if dx == 0 and dy == 0 and dz < 0: * tmp1 = 0.00001*(x2 - x1) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":338 * for i in range(2): * dx = x[i] - xp[l] * if dx == 0 and dy == 0 and dz < 0: # <<<<<<<<<<<<<< * tmp1 = 0.00001*(x2 - x1) * tmp2 = 0.00001*(y2 - y1) */ __pyx_t_23 = ((__pyx_v_dx == 0.0) != 0); if (__pyx_t_23) { __pyx_t_24 = ((__pyx_v_dy == 0.0) != 0); if (__pyx_t_24) { __pyx_t_25 = ((__pyx_v_dz < 0.0) != 0); __pyx_t_26 = __pyx_t_25; } else { __pyx_t_26 = __pyx_t_24; } __pyx_t_24 = __pyx_t_26; } else { __pyx_t_24 = __pyx_t_23; } if (__pyx_t_24) { /* "fatiando/gravmag/_prism.pyx":339 * dx = x[i] - xp[l] * if dx == 0 and dy == 0 and dz < 0: * tmp1 = 0.00001*(x2 - x1) # <<<<<<<<<<<<<< * tmp2 = 0.00001*(y2 - y1) * r = sqrt(tmp1**2 + tmp2**2 + dz**2) */ __pyx_v_tmp1 = (0.00001 * (__pyx_v_x2 - __pyx_v_x1)); /* "fatiando/gravmag/_prism.pyx":340 * if dx == 0 and dy == 0 and dz < 0: * tmp1 = 0.00001*(x2 - x1) * tmp2 = 0.00001*(y2 - y1) # <<<<<<<<<<<<<< * r = sqrt(tmp1**2 + tmp2**2 + dz**2) * else: */ __pyx_v_tmp2 = (0.00001 * (__pyx_v_y2 - __pyx_v_y1)); /* "fatiando/gravmag/_prism.pyx":341 * tmp1 = 0.00001*(x2 - x1) * tmp2 = 0.00001*(y2 - y1) * r = sqrt(tmp1**2 + tmp2**2 + dz**2) # <<<<<<<<<<<<<< * else: * r = sqrt(dx**2 + dy**2 + dz**2) */ __pyx_v_r = sqrt(((pow(__pyx_v_tmp1, 2.0) + pow(__pyx_v_tmp2, 2.0)) + pow(__pyx_v_dz, 2.0))); goto __pyx_L16; } /*else*/ { /* "fatiando/gravmag/_prism.pyx":343 * r = sqrt(tmp1**2 + tmp2**2 + dz**2) * else: * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelxy(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); } __pyx_L16:; /* "fatiando/gravmag/_prism.pyx":344 * else: * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxy(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelxy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":345 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxy(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_27 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":329 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":316 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":349 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_19gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_18gxz[] = "gxz(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_19gxz = {__Pyx_NAMESTR("gxz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_19gxz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_18gxz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_19gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_18gxz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_18gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_tmp1; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_tmp2; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; int __pyx_t_25; int __pyx_t_26; unsigned int __pyx_t_27; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxz", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":358 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz, tmp1, tmp2 * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 358; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":359 * cdef DTYPE_T kernel, r, dx, dy, dz, tmp1, tmp2 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 359; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":360 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":361 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 361; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":362 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":363 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_26, __pyx_t_18, __pyx_t_23, __pyx_t_25, __pyx_t_20, __pyx_t_27, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_24, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_i) lastprivate(__pyx_v_r) lastprivate(__pyx_v_tmp1) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_k) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_dx) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_tmp2) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_tmp1 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_tmp2 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_j = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_prism.pyx":365 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":366 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":367 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":368 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":369 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * if dx == 0 and dz == 0 and dy < 0: */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":370 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * if dx == 0 and dz == 0 and dy < 0: * tmp1 = 0.00001*(x2 - x1) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":371 * for i in range(2): * dx = x[i] - xp[l] * if dx == 0 and dz == 0 and dy < 0: # <<<<<<<<<<<<<< * tmp1 = 0.00001*(x2 - x1) * tmp2 = 0.00001*(z2 - z1) */ __pyx_t_23 = ((__pyx_v_dx == 0.0) != 0); if (__pyx_t_23) { __pyx_t_24 = ((__pyx_v_dz == 0.0) != 0); if (__pyx_t_24) { __pyx_t_25 = ((__pyx_v_dy < 0.0) != 0); __pyx_t_26 = __pyx_t_25; } else { __pyx_t_26 = __pyx_t_24; } __pyx_t_24 = __pyx_t_26; } else { __pyx_t_24 = __pyx_t_23; } if (__pyx_t_24) { /* "fatiando/gravmag/_prism.pyx":372 * dx = x[i] - xp[l] * if dx == 0 and dz == 0 and dy < 0: * tmp1 = 0.00001*(x2 - x1) # <<<<<<<<<<<<<< * tmp2 = 0.00001*(z2 - z1) * r = sqrt(tmp1**2 + tmp2**2 + dy**2) */ __pyx_v_tmp1 = (0.00001 * (__pyx_v_x2 - __pyx_v_x1)); /* "fatiando/gravmag/_prism.pyx":373 * if dx == 0 and dz == 0 and dy < 0: * tmp1 = 0.00001*(x2 - x1) * tmp2 = 0.00001*(z2 - z1) # <<<<<<<<<<<<<< * r = sqrt(tmp1**2 + tmp2**2 + dy**2) * else: */ __pyx_v_tmp2 = (0.00001 * (__pyx_v_z2 - __pyx_v_z1)); /* "fatiando/gravmag/_prism.pyx":374 * tmp1 = 0.00001*(x2 - x1) * tmp2 = 0.00001*(z2 - z1) * r = sqrt(tmp1**2 + tmp2**2 + dy**2) # <<<<<<<<<<<<<< * else: * r = sqrt(dx**2 + dy**2 + dz**2) */ __pyx_v_r = sqrt(((pow(__pyx_v_tmp1, 2.0) + pow(__pyx_v_tmp2, 2.0)) + pow(__pyx_v_dy, 2.0))); goto __pyx_L16; } /*else*/ { /* "fatiando/gravmag/_prism.pyx":376 * r = sqrt(tmp1**2 + tmp2**2 + dy**2) * else: * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelxz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); } __pyx_L16:; /* "fatiando/gravmag/_prism.pyx":377 * else: * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxz(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelxz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":378 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelxz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_27 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":362 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":349 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":382 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_21gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_20gyy[] = "gyy(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_21gyy = {__Pyx_NAMESTR("gyy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_21gyy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_20gyy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_21gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyy", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 387; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_20gyy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_20gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyy", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":391 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":392 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":393 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 393; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":394 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":395 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":396 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_dy) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_i) lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_k) lastprivate(__pyx_v_j) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_dz) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":398 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":399 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":400 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":401 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":402 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":403 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelyy(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":404 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelyy(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":405 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelyy(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelyy(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":406 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelyy(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":395 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":382 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":410 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_23gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_22gyz[] = "gyz(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_23gyz = {__Pyx_NAMESTR("gyz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_23gyz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_22gyz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_23gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 413; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 414; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 415; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_22gyz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_22gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_tmp1; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_tmp2; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; int __pyx_t_25; int __pyx_t_26; unsigned int __pyx_t_27; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyz", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":419 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz, tmp1, tmp2 * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 419; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":420 * cdef DTYPE_T kernel, r, dx, dy, dz, tmp1, tmp2 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":421 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":422 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":423 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":424 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_26, __pyx_t_18, __pyx_t_23, __pyx_t_25, __pyx_t_20, __pyx_t_27, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_24, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_tmp1) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_i) lastprivate(__pyx_v_r) lastprivate(__pyx_v_tmp2) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_k) lastprivate(__pyx_v_j) lastprivate(__pyx_v_kernel) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_tmp1 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_tmp2 = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":426 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":427 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":428 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":429 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":430 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * if dy == 0 and dz == 0 and dx < 0: */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":431 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * if dy == 0 and dz == 0 and dx < 0: * tmp1 = 0.00001*(y2 - y1) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":432 * for i in range(2): * dx = x[i] - xp[l] * if dy == 0 and dz == 0 and dx < 0: # <<<<<<<<<<<<<< * tmp1 = 0.00001*(y2 - y1) * tmp2 = 0.00001*(z2 - z1) */ __pyx_t_23 = ((__pyx_v_dy == 0.0) != 0); if (__pyx_t_23) { __pyx_t_24 = ((__pyx_v_dz == 0.0) != 0); if (__pyx_t_24) { __pyx_t_25 = ((__pyx_v_dx < 0.0) != 0); __pyx_t_26 = __pyx_t_25; } else { __pyx_t_26 = __pyx_t_24; } __pyx_t_24 = __pyx_t_26; } else { __pyx_t_24 = __pyx_t_23; } if (__pyx_t_24) { /* "fatiando/gravmag/_prism.pyx":433 * dx = x[i] - xp[l] * if dy == 0 and dz == 0 and dx < 0: * tmp1 = 0.00001*(y2 - y1) # <<<<<<<<<<<<<< * tmp2 = 0.00001*(z2 - z1) * r = sqrt(tmp1**2 + tmp2**2 + dx**2) */ __pyx_v_tmp1 = (0.00001 * (__pyx_v_y2 - __pyx_v_y1)); /* "fatiando/gravmag/_prism.pyx":434 * if dy == 0 and dz == 0 and dx < 0: * tmp1 = 0.00001*(y2 - y1) * tmp2 = 0.00001*(z2 - z1) # <<<<<<<<<<<<<< * r = sqrt(tmp1**2 + tmp2**2 + dx**2) * else: */ __pyx_v_tmp2 = (0.00001 * (__pyx_v_z2 - __pyx_v_z1)); /* "fatiando/gravmag/_prism.pyx":435 * tmp1 = 0.00001*(y2 - y1) * tmp2 = 0.00001*(z2 - z1) * r = sqrt(tmp1**2 + tmp2**2 + dx**2) # <<<<<<<<<<<<<< * else: * r = sqrt(dx**2 + dy**2 + dz**2) */ __pyx_v_r = sqrt(((pow(__pyx_v_tmp1, 2.0) + pow(__pyx_v_tmp2, 2.0)) + pow(__pyx_v_dx, 2.0))); goto __pyx_L16; } /*else*/ { /* "fatiando/gravmag/_prism.pyx":437 * r = sqrt(tmp1**2 + tmp2**2 + dx**2) * else: * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelyz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); } __pyx_L16:; /* "fatiando/gravmag/_prism.pyx":438 * else: * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelyz(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelyz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":439 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelyz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_27 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":423 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":410 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":443 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_25gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_24gzz[] = "gzz(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_25gzz = {__Pyx_NAMESTR("gzz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_25gzz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_24gzz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_25gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gzz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gzz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 447; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gzz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 448; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_24gzz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_24gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gzz", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":452 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":453 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":454 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":455 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":456 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":457 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_k) lastprivate(__pyx_v_j) lastprivate(__pyx_v_dz) lastprivate(__pyx_v_i) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_dx) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_j = ((unsigned int)0xbad0bad0); __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_prism.pyx":459 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":460 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":461 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":462 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":463 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":464 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelzz(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":465 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelzz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":466 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelzz(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density * */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelzz(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":467 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelzz(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":456 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":443 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_prism.pyx":471 * @cython.wraparound(False) * @cython.boundscheck(False) * def potential(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_27potential(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_6_prism_26potential[] = "potential(ndarray xp, ndarray yp, ndarray zp, double x1, double x2, double y1, double y2, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_6_prism_27potential = {__Pyx_NAMESTR("potential"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_6_prism_27potential, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_6_prism_26potential)}; static PyObject *__pyx_pw_8fatiando_7gravmag_6_prism_27potential(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_x1; double __pyx_v_x2; double __pyx_v_y1; double __pyx_v_y2; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("potential (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x1,&__pyx_n_s_x2,&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "potential") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_x2 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_x2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_y1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_y2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_y2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("potential", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._prism.potential", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 472; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 473; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_6_prism_26potential(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x1, __pyx_v_x2, __pyx_v_y1, __pyx_v_y2, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_6_prism_26potential(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_x1, double __pyx_v_x2, double __pyx_v_y1, double __pyx_v_y2, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_j; unsigned int __pyx_v_k; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; PyArrayObject *__pyx_v_z = 0; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_r; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dx; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dy; __pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T __pyx_v_dz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_z; __Pyx_Buffer __pyx_pybuffer_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; unsigned int __pyx_t_11; unsigned int __pyx_t_12; unsigned int __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; unsigned int __pyx_t_18; unsigned int __pyx_t_19; unsigned int __pyx_t_20; unsigned int __pyx_t_21; unsigned int __pyx_t_22; unsigned int __pyx_t_23; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("potential", 0); __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_z.pybuffer.buf = NULL; __pyx_pybuffer_z.refcount = 0; __pyx_pybuffernd_z.data = NULL; __pyx_pybuffernd_z.rcbuffer = &__pyx_pybuffer_z; __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_prism.pyx":480 * cdef numpy.ndarray[DTYPE_T, ndim=1] x, y, z * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) # <<<<<<<<<<<<<< * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 480; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_prism.pyx":481 * cdef DTYPE_T kernel, r, dx, dy, dz * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) # <<<<<<<<<<<<<< * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_x2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_x1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_x = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":482 * size = len(xp) * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) # <<<<<<<<<<<<<< * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_y2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_y1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_10, &__pyx_t_9, &__pyx_t_8); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_8); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_10, __pyx_t_9, __pyx_t_8); } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_y = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":483 * x = numpy.array([x2, x1], dtype=DTYPE) * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyFloat_FromDouble(__pyx_v_z2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_z1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_6 = ((PyArrayObject *)__pyx_t_2); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __pyx_t_7 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_7 < 0)) { PyErr_Fetch(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_z.rcbuffer->pybuffer, (PyObject*)__pyx_v_z, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_8); Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_8, __pyx_t_9, __pyx_t_10); } } __pyx_pybuffernd_z.diminfo[0].strides = __pyx_pybuffernd_z.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_z.diminfo[0].shape = __pyx_pybuffernd_z.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 483; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_6 = 0; __pyx_v_z = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":484 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_prism.pyx":485 * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # Evaluate the integration limits * for k in range(2): */ __pyx_t_11 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_13 = (__pyx_t_11 - 0) / 1; if (__pyx_t_13 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_23, __pyx_t_18, __pyx_t_20, __pyx_t_17, __pyx_t_16, __pyx_t_21, __pyx_t_14, __pyx_t_19, __pyx_t_22, __pyx_t_15) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_dz) lastprivate(__pyx_v_dy) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_i) lastprivate(__pyx_v_r) lastprivate(__pyx_v_kernel) lastprivate(__pyx_v_dx) lastprivate(__pyx_v_k) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_13; __pyx_t_12++){ { __pyx_v_l = 0 + 1 * __pyx_t_12; /* Initialize private variables to invalid values */ __pyx_v_dz = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dy = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_i = ((unsigned int)0xbad0bad0); __pyx_v_r = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_kernel = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_dx = ((__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_j = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_prism.pyx":487 * for l in prange(size): * # Evaluate the integration limits * for k in range(2): # <<<<<<<<<<<<<< * dz = z[k] - zp[l] * for j in range(2): */ for (__pyx_t_14 = 0; __pyx_t_14 < 2; __pyx_t_14+=1) { __pyx_v_k = __pyx_t_14; /* "fatiando/gravmag/_prism.pyx":488 * # Evaluate the integration limits * for k in range(2): * dz = z[k] - zp[l] # <<<<<<<<<<<<<< * for j in range(2): * dy = y[j] - yp[l] */ __pyx_t_15 = __pyx_v_k; __pyx_t_16 = __pyx_v_l; __pyx_v_dz = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_z.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_z.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":489 * for k in range(2): * dz = z[k] - zp[l] * for j in range(2): # <<<<<<<<<<<<<< * dy = y[j] - yp[l] * for i in range(2): */ for (__pyx_t_17 = 0; __pyx_t_17 < 2; __pyx_t_17+=1) { __pyx_v_j = __pyx_t_17; /* "fatiando/gravmag/_prism.pyx":490 * dz = z[k] - zp[l] * for j in range(2): * dy = y[j] - yp[l] # <<<<<<<<<<<<<< * for i in range(2): * dx = x[i] - xp[l] */ __pyx_t_18 = __pyx_v_j; __pyx_t_19 = __pyx_v_l; __pyx_v_dy = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":491 * for j in range(2): * dy = y[j] - yp[l] * for i in range(2): # <<<<<<<<<<<<<< * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) */ for (__pyx_t_20 = 0; __pyx_t_20 < 2; __pyx_t_20+=1) { __pyx_v_i = __pyx_t_20; /* "fatiando/gravmag/_prism.pyx":492 * dy = y[j] - yp[l] * for i in range(2): * dx = x[i] - xp[l] # <<<<<<<<<<<<<< * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelpot(dx, dy, dz, r) */ __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_l; __pyx_v_dx = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_prism.pyx":493 * for i in range(2): * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) # <<<<<<<<<<<<<< * kernel = kernelpot(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_r = sqrt(((pow(__pyx_v_dx, 2.0) + pow(__pyx_v_dy, 2.0)) + pow(__pyx_v_dz, 2.0))); /* "fatiando/gravmag/_prism.pyx":494 * dx = x[i] - xp[l] * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelpot(dx, dy, dz, r) # <<<<<<<<<<<<<< * res[l] += ((-1.)**(i + j + k))*kernel*density */ __pyx_v_kernel = __pyx_f_8fatiando_7gravmag_6_prism_kernelpot(__pyx_v_dx, __pyx_v_dy, __pyx_v_dz, __pyx_v_r); /* "fatiando/gravmag/_prism.pyx":495 * r = sqrt(dx**2 + dy**2 + dz**2) * kernel = kernelpot(dx, dy, dz, r) * res[l] += ((-1.)**(i + j + k))*kernel*density # <<<<<<<<<<<<<< */ __pyx_t_23 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_6_prism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_res.diminfo[0].strides) += ((pow(-1., ((double)((__pyx_v_i + __pyx_v_j) + __pyx_v_k))) * __pyx_v_kernel) * __pyx_v_density); } } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_prism.pyx":484 * y = numpy.array([y2, y1], dtype=DTYPE) * z = numpy.array([z2, z1], dtype=DTYPE) * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # Evaluate the integration limits */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_prism.pyx":471 * @cython.wraparound(False) * @cython.boundscheck(False) * def potential(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._prism.potential", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_z.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_x); __Pyx_XDECREF((PyObject *)__pyx_v_y); __Pyx_XDECREF((PyObject *)__pyx_v_z); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_2 = (__pyx_v_copy_shape != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0); if (__pyx_t_1) { __pyx_t_2 = (__pyx_v_little_endian != 0); } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_1) { __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} break; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0); if (__pyx_t_6) { __pyx_t_7 = (__pyx_v_little_endian != 0); } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_6) { __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("_prism"), __Pyx_DOCSTR(__pyx_k_Cython_implementation_of_the_gr), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, {&__pyx_n_s_bx, __pyx_k_bx, sizeof(__pyx_k_bx), 0, 0, 1, 1}, {&__pyx_n_s_by, __pyx_k_by, sizeof(__pyx_k_by), 0, 0, 1, 1}, {&__pyx_n_s_bz, __pyx_k_bz, sizeof(__pyx_k_bz), 0, 0, 1, 1}, {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1}, {&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1}, {&__pyx_n_s_dz, __pyx_k_dz, sizeof(__pyx_k_dz), 0, 0, 1, 1}, {&__pyx_n_s_fatiando_gravmag__prism, __pyx_k_fatiando_gravmag__prism, sizeof(__pyx_k_fatiando_gravmag__prism), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_fx, __pyx_k_fx, sizeof(__pyx_k_fx), 0, 0, 1, 1}, {&__pyx_n_s_fy, __pyx_k_fy, sizeof(__pyx_k_fy), 0, 0, 1, 1}, {&__pyx_n_s_fz, __pyx_k_fz, sizeof(__pyx_k_fz), 0, 0, 1, 1}, {&__pyx_n_s_gx, __pyx_k_gx, sizeof(__pyx_k_gx), 0, 0, 1, 1}, {&__pyx_n_s_gxx, __pyx_k_gxx, sizeof(__pyx_k_gxx), 0, 0, 1, 1}, {&__pyx_n_s_gxy, __pyx_k_gxy, sizeof(__pyx_k_gxy), 0, 0, 1, 1}, {&__pyx_n_s_gxz, __pyx_k_gxz, sizeof(__pyx_k_gxz), 0, 0, 1, 1}, {&__pyx_n_s_gy, __pyx_k_gy, sizeof(__pyx_k_gy), 0, 0, 1, 1}, {&__pyx_n_s_gyy, __pyx_k_gyy, sizeof(__pyx_k_gyy), 0, 0, 1, 1}, {&__pyx_n_s_gyz, __pyx_k_gyz, sizeof(__pyx_k_gyz), 0, 0, 1, 1}, {&__pyx_n_s_gz, __pyx_k_gz, sizeof(__pyx_k_gz), 0, 0, 1, 1}, {&__pyx_n_s_gzz, __pyx_k_gzz, sizeof(__pyx_k_gzz), 0, 0, 1, 1}, {&__pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_k_home_leo_src_fatiando_fatiando, sizeof(__pyx_k_home_leo_src_fatiando_fatiando), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_kernel, __pyx_k_kernel, sizeof(__pyx_k_kernel), 0, 0, 1, 1}, {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mx, __pyx_k_mx, sizeof(__pyx_k_mx), 0, 0, 1, 1}, {&__pyx_n_s_my, __pyx_k_my, sizeof(__pyx_k_my), 0, 0, 1, 1}, {&__pyx_n_s_mz, __pyx_k_mz, sizeof(__pyx_k_mz), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_potential, __pyx_k_potential, sizeof(__pyx_k_potential), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tf, __pyx_k_tf, sizeof(__pyx_k_tf), 0, 0, 1, 1}, {&__pyx_n_s_tmp1, __pyx_k_tmp1, sizeof(__pyx_k_tmp1), 0, 0, 1, 1}, {&__pyx_n_s_tmp2, __pyx_k_tmp2, sizeof(__pyx_k_tmp2), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1}, {&__pyx_n_s_v2, __pyx_k_v2, sizeof(__pyx_k_v2), 0, 0, 1, 1}, {&__pyx_n_s_v3, __pyx_k_v3, sizeof(__pyx_k_v3), 0, 0, 1, 1}, {&__pyx_n_s_v4, __pyx_k_v4, sizeof(__pyx_k_v4), 0, 0, 1, 1}, {&__pyx_n_s_v5, __pyx_k_v5, sizeof(__pyx_k_v5), 0, 0, 1, 1}, {&__pyx_n_s_v6, __pyx_k_v6, sizeof(__pyx_k_v6), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_x1, __pyx_k_x1, sizeof(__pyx_k_x1), 0, 0, 1, 1}, {&__pyx_n_s_x2, __pyx_k_x2, sizeof(__pyx_k_x2), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_y1, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, {&__pyx_n_s_y2, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, {&__pyx_n_s_yp, __pyx_k_yp, sizeof(__pyx_k_yp), 0, 0, 1, 1}, {&__pyx_n_s_z, __pyx_k_z, sizeof(__pyx_k_z), 0, 0, 1, 1}, {&__pyx_n_s_z1, __pyx_k_z1, sizeof(__pyx_k_z1), 0, 0, 1, 1}, {&__pyx_n_s_z2, __pyx_k_z2, sizeof(__pyx_k_z2), 0, 0, 1, 1}, {&__pyx_n_s_zp, __pyx_k_zp, sizeof(__pyx_k_zp), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "fatiando/gravmag/_prism.pyx":74 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__7 = PyTuple_Pack(38, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_fx, __pyx_n_s_fy, __pyx_n_s_fz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_bx, __pyx_n_s_by, __pyx_n_s_bz, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(16, 0, 38, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_tf, 74, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":111 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__9 = PyTuple_Pack(29, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(13, 0, 29, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bx, 111, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":142 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__11 = PyTuple_Pack(29, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_v2, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(13, 0, 29, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_by, 142, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":173 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__13 = PyTuple_Pack(29, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_v3, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(13, 0, 29, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bz, 173, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":204 * @cython.wraparound(False) * @cython.boundscheck(False) * def gx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__15 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gx, 204, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":232 * @cython.wraparound(False) * @cython.boundscheck(False) * def gy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__17 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gy, 232, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":260 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__19 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gz, 260, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":288 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__21 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxx, 288, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":316 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__23 = PyTuple_Pack(26, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz, __pyx_n_s_tmp1, __pyx_n_s_tmp2); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(11, 0, 26, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxy, 316, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":349 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__25 = PyTuple_Pack(26, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz, __pyx_n_s_tmp1, __pyx_n_s_tmp2); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(11, 0, 26, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxz, 349, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":382 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__27 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyy, 382, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":410 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__29 = PyTuple_Pack(26, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz, __pyx_n_s_tmp1, __pyx_n_s_tmp2); if (unlikely(!__pyx_tuple__29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(11, 0, 26, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyz, 410, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":443 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__31 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gzz, 443, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_prism.pyx":471 * @cython.wraparound(False) * @cython.boundscheck(False) * def potential(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__33 = PyTuple_Pack(24, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_kernel, __pyx_n_s_r, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_dz); if (unlikely(!__pyx_tuple__33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(11, 0, 24, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_potential, 471, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_prism(void); /*proto*/ PyMODINIT_FUNC init_prism(void) #else PyMODINIT_FUNC PyInit__prism(void); /*proto*/ PyMODINIT_FUNC PyInit__prism(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__prism(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_prism"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_Cython_implementation_of_the_gr), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_fatiando__gravmag___prism) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "fatiando.gravmag._prism")) { if (unlikely(PyDict_SetItemString(modules, "fatiando.gravmag._prism", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "fatiando/gravmag/_prism.pyx":6 * prisms. * """ * import numpy # <<<<<<<<<<<<<< * * from libc.math cimport log, atan2, sqrt */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "fatiando/gravmag/_prism.pyx":15 * from cython.parallel cimport prange, parallel * * DTYPE = numpy.float # <<<<<<<<<<<<<< * ctypedef numpy.float_t DTYPE_T * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":74 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_1tf, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_tf, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":111 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_3bx, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":142 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_5by, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_by, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":173 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_7bz, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":204 * @cython.wraparound(False) * @cython.boundscheck(False) * def gx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_9gx, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 204; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":232 * @cython.wraparound(False) * @cython.boundscheck(False) * def gy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_11gy, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":260 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_13gz, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":288 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_15gxx, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":316 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_17gxy, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":349 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_19gxz, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":382 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_21gyy, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 382; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":410 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_23gyz, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 410; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":443 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_25gzz, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gzz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":471 * @cython.wraparound(False) * @cython.boundscheck(False) * def potential(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_6_prism_27potential, NULL, __pyx_n_s_fatiando_gravmag__prism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_potential, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 471; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_prism.pyx":1 * #cython: embedsignature=True # <<<<<<<<<<<<<< * """ * Cython implementation of the gravity and magnetic fields of right rectangular */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init fatiando.gravmag._prism", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fatiando.gravmag._prism"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (result) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; #endif result = (*call)(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(unsigned int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyLong_AsLong) } else if (sizeof(unsigned int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_DATA_SIZE(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1) / 2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this] { return !queue_.empty(); }); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) {} Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_) ->set_request_handle(std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle(std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands( static_cast<KVStoreServerProfilerCommand>(recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const& stored_entry : store_) { const int key = stored_entry.first; const NDArray& stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto& stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto& update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const& stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i = 0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter " << parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i = 0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, const ps::KVPairs<char>& req_data, UpdateBuf* update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t)ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored]() { CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } /** * Request can be for either push, pull or pushpull * If pull flag is set, respond immediately with the updated values * Otherwise, only send the notification */ bool has_pull = false; for (const auto& req : update_buf->request) { has_pull = has_pull || req.pull; } if (has_pull) { // if there is a pull request, perform WaitToRead() once before DefaultStorageResponse if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); for (const auto& req : update_buf->request) { if (req.pull) { DefaultStorageResponse(type, key, req, req_data, server); } } update_buf->request.clear(); } else { // otherwise, send response directly for (const auto& req : update_buf->request) { server->Response(req); } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key>& keys, int64_t* indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char*>(stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t)unit_len}; mxnet::TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu>* s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t)num_rows, (size_t)unit_len}; mxnet::TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = mxnet::TShape{(int64_t)original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t)req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto& updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
blake2sp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 8 static inline int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; store48( P->node_offset, offset ); P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } static inline int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; store48( P->node_offset, 0ULL ); P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } int blake2sp_init( blake2sp_state *S, const uint8_t outlen ) { if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2sp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2sp_init_key( blake2sp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2sp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const uint8_t *in, uint64_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[id__], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2sp_final( blake2sp_state *S, uint8_t *out, const uint8_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( S->R, out, outlen ); } int blake2sp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[id__], in__, len ); } blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); } #if defined(BLAKE2SP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( int argc, char **argv ) { uint8_t key[BLAKE2S_KEYBYTES]; uint8_t buf[KAT_LENGTH]; for( size_t i = 0; i < BLAKE2S_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp( hash, buf, key, BLAKE2S_OUTBYTES, i, BLAKE2S_KEYBYTES ); if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) ) { puts( "error" ); return -1; } } puts( "ok" ); return 0; } #endif
GB_binop__eq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int16) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int16) // A*D function (colscale): GB (_AxD__eq_int16) // D*A function (rowscale): GB (_DxB__eq_int16) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int16) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int16) // C=scalar+B GB (_bind1st__eq_int16) // C=scalar+B' GB (_bind1st_tran__eq_int16) // C=A+scalar GB (_bind2nd__eq_int16) // C=A'+scalar GB (_bind2nd_tran__eq_int16) // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT16 || GxB_NO_EQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "opencl.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; #define class temp #define new new_temp list *get_paths(char *filename) { if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { pthread_mutex_lock(&mutex); char **random_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ //grayscale_image_3c(crop); free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { //if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = (box_label*)calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = (box_label*)realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; int j; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); image mask = resize_image(sized, mw, mh); truth[i*(mw*mh+1)] = id; for(j = 0; j < mw*mh; ++j){ truth[i*(mw*mh + 1) + 1 + j] = mask.data[j]; } ++i; free_image(mask); free_image(sized); free(rle); } if(i < num_boxes) truth[i*(mw*mh+1)] = -1; fclose(file); free_image(part); } void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i,j; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for(j = 0; j < k; ++j){ fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = (float**)calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*90; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); } free(random_paths); return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == ISEG_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = (data*)calloc(args.threads, sizeof(data)); pthread_t *threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = (float**)calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = (float**)calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = (data*)calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)calloc(num, sizeof(float *)); r.y.vals = (float**)calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = (data*)calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)calloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)calloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)calloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; } #undef class #undef new
drsdd.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/mpi/blrm/drsdd.c * @version 0.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" #include "starsh-mpi.h" int cmp_size_t(const void *a, const void *b) //! Compare two size_t integers { size_t _a = *(size_t *)a; size_t _b = *(size_t *)b; if(_a > _b) return 1; if(_a == _b) return 0; return -1; } int starsh_blrm__drsdd_mpi(STARSH_blrm **matrix, STARSH_blrf *format, int maxrank, double tol, int onfly) //! Approximate each tile by randomized SVD. /*! * @param[out] matrix: Address of pointer to @ref STARSH_blrm object. * @param[in] format: Block low-rank format. * @param[in] maxrank: Maximum possible rank. * @param[in] tol: Relative error tolerance. * @param[in] onfly: Whether not to store dense blocks. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrf *F = format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nblocks_far = F->nblocks_far; STARSH_int nblocks_near = F->nblocks_near; STARSH_int nblocks_far_local = F->nblocks_far_local; STARSH_int nblocks_near_local = F->nblocks_near_local; // Shortcuts to information about clusters STARSH_cluster *RC = F->row_cluster; STARSH_cluster *CC = F->col_cluster; void *RD = RC->data, *CD = CC->data; // Following values default to given block low-rank format F, but they are // changed when there are false far-field blocks. STARSH_int new_nblocks_far = F->nblocks_far; STARSH_int new_nblocks_near = F->nblocks_near; STARSH_int new_nblocks_far_local = F->nblocks_far_local; STARSH_int new_nblocks_near_local = F->nblocks_near_local; STARSH_int *block_far = F->block_far; STARSH_int *block_near = F->block_near; STARSH_int *block_far_local = F->block_far_local; STARSH_int *block_near_local = F->block_near_local; // Places to store low-rank factors, dense blocks and ranks Array **far_U = NULL, **far_V = NULL, **near_D = NULL; int *far_rank = NULL; double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL; size_t offset_U = 0, offset_V = 0, offset_D = 0; STARSH_int lbi, lbj, bi, bj = 0; double drsdd_time = 0, kernel_time = 0; const int oversample = starsh_params.oversample; // Init buffers to store low-rank factors of far-field blocks if needed if(nblocks_far > 0) { STARSH_MALLOC(far_U, nblocks_far_local); STARSH_MALLOC(far_V, nblocks_far_local); STARSH_MALLOC(far_rank, nblocks_far_local); size_t size_U = 0, size_V = 0; // Simple cycle over all far-field blocks for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_U += RC->size[i]; size_V += CC->size[j]; } size_U *= maxrank; size_V *= maxrank; STARSH_MALLOC(alloc_U, size_U); STARSH_MALLOC(alloc_V, size_V); for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i], ncols = CC->size[j]; int shape_U[] = {nrows, maxrank}; int shape_V[] = {ncols, maxrank}; double *U = alloc_U+offset_U, *V = alloc_V+offset_V; offset_U += nrows*maxrank; offset_V += ncols*maxrank; array_from_buffer(far_U+lbi, 2, shape_U, 'd', 'F', U); array_from_buffer(far_V+lbi, 2, shape_V, 'd', 'F', V); } offset_U = 0; offset_V = 0; } // Work variables int info; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_far[2*bi]; STARSH_int j = block_far[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int mn = nrows < ncols ? nrows : ncols; int mn2 = maxrank+oversample; if(mn2 > mn) mn2 = mn; // Get size of temporary arrays int lwork = ncols, lwork_sdd = (4*mn2+7)*mn2; if(lwork_sdd > lwork) lwork = lwork_sdd; lwork += (size_t)mn2*(2*ncols+nrows+mn2+1); int liwork = 8*mn2; double *D, *work; int *iwork; int info; // Allocate temporary arrays STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info); STARSH_PMALLOC(iwork, liwork, info); STARSH_PMALLOC(work, lwork, info); // Compute elements of a block #ifdef OPENMP double time0 = omp_get_wtime(); #endif kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); #ifdef OPENMP double time1 = omp_get_wtime(); #endif starsh_dense_dlrrsdd(nrows, ncols, D, nrows, far_U[lbi]->data, nrows, far_V[lbi]->data, ncols, far_rank+lbi, maxrank, oversample, tol, work, lwork, iwork); #ifdef OPENMP double time2 = omp_get_wtime(); #pragma omp critical { drsdd_time += time2-time1; kernel_time += time1-time0; } #endif // Free temporary arrays free(D); free(work); free(iwork); } // Get number of false far-field blocks STARSH_int nblocks_false_far_local = 0; STARSH_int *false_far_local = NULL; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(far_rank[lbi] == -1) nblocks_false_far_local++; if(nblocks_false_far_local > 0) { // IMPORTANT: `false_far` and `false_far_local` must be in // ascending order for later code to work normally STARSH_MALLOC(false_far_local, nblocks_false_far_local); lbj = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(far_rank[lbi] == -1) false_far_local[lbj++] = block_far_local[lbi]; } // Sync list of all false far-field blocks STARSH_int nblocks_false_far = 0; int int_nblocks_false_far_local = nblocks_false_far_local; int *mpi_recvcount, *mpi_offset; int mpi_size, mpi_rank; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); STARSH_MALLOC(mpi_recvcount, mpi_size); STARSH_MALLOC(mpi_offset, mpi_size); MPI_Allgather(&int_nblocks_false_far_local, 1, MPI_INT, mpi_recvcount, 1, MPI_INT, MPI_COMM_WORLD); for(bi = 0; bi < mpi_size; bi++) nblocks_false_far += mpi_recvcount[bi]; mpi_offset[0] = 0; for(bi = 1; bi < mpi_size; bi++) mpi_offset[bi] = mpi_offset[bi-1]+mpi_recvcount[bi-1]; STARSH_int *false_far = NULL; if(nblocks_false_far > 0) STARSH_MALLOC(false_far, nblocks_false_far); MPI_Allgatherv(false_far_local, nblocks_false_far_local, my_MPI_SIZE_T, false_far, mpi_recvcount, mpi_offset, my_MPI_SIZE_T, MPI_COMM_WORLD); free(mpi_recvcount); free(mpi_offset); // Make false_far be in ascending order qsort(false_far, nblocks_false_far, sizeof(*false_far), cmp_size_t); if(nblocks_false_far > 0) { // Update list of near-field blocks new_nblocks_near = nblocks_near+nblocks_false_far; new_nblocks_near_local = nblocks_near_local+nblocks_false_far_local; STARSH_MALLOC(block_near, 2*new_nblocks_near); if(new_nblocks_near_local > 0) STARSH_MALLOC(block_near_local, new_nblocks_near_local); // At first get all near-field blocks, assumed to be dense #pragma omp parallel for schedule(static) for(bi = 0; bi < 2*nblocks_near; bi++) block_near[bi] = F->block_near[bi]; #pragma omp parallel for schedule(static) for(lbi = 0; lbi < nblocks_near_local; lbi++) block_near_local[lbi] = F->block_near_local[lbi]; // Add false far-field blocks #pragma omp parallel for schedule(static) for(bi = 0; bi < nblocks_false_far; bi++) { STARSH_int bj = false_far[bi]; block_near[2*(bi+nblocks_near)] = F->block_far[2*bj]; block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1]; } bi = 0; for(lbi = 0; lbi < nblocks_false_far_local; lbi++) { lbj = false_far_local[lbi]; while(bi < nblocks_false_far && false_far[bi] < lbj) bi++; block_near_local[nblocks_near_local+lbi] = nblocks_near+bi; } // Update list of far-field blocks new_nblocks_far = nblocks_far-nblocks_false_far; new_nblocks_far_local = nblocks_far_local-nblocks_false_far_local; if(new_nblocks_far > 0) { STARSH_MALLOC(block_far, 2*new_nblocks_far); if(new_nblocks_far_local > 0) STARSH_MALLOC(block_far_local, new_nblocks_far_local); bj = 0; lbi = 0; lbj = 0; for(bi = 0; bi < nblocks_far; bi++) { // `false_far` must be in ascending order for this to work if(bj < nblocks_false_far && false_far[bj] == bi) { if(nblocks_false_far_local > lbj && false_far_local[lbj] == bi) { lbi++; lbj++; } bj++; } else { block_far[2*(bi-bj)] = F->block_far[2*bi]; block_far[2*(bi-bj)+1] = F->block_far[2*bi+1]; if(nblocks_far_local > lbi && F->block_far_local[lbi] == bi) { block_far_local[lbi-lbj] = bi-bj; lbi++; } } } } // Update format by creating new format STARSH_blrf *F2; info = starsh_blrf_new_from_coo_mpi(&F2, P, F->symm, RC, CC, new_nblocks_far, block_far, new_nblocks_far_local, block_far_local, new_nblocks_near, block_near, new_nblocks_near_local, block_near_local, F->type); // Swap internal data of formats and free unnecessary data STARSH_blrf tmp_blrf = *F; *F = *F2; *F2 = tmp_blrf; if(mpi_rank == 0) STARSH_WARNING("`F` was modified due to false far-field blocks"); starsh_blrf_free(F2); } // Compute near-field blocks if needed if(onfly == 0 && new_nblocks_near_local > 0) { STARSH_MALLOC(near_D, new_nblocks_near_local); size_t size_D = 0; // Simple cycle over all near-field blocks for(lbi = 0; lbi < new_nblocks_near_local; lbi++) { STARSH_int bi = block_near_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them size_t nrows = RC->size[i]; size_t ncols = CC->size[j]; // Update size_D size_D += nrows*ncols; } STARSH_MALLOC(alloc_D, size_D); // For each near-field block compute its elements #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < new_nblocks_near_local; lbi++) { STARSH_int bi = block_near_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = block_near[2*bi]; STARSH_int j = block_near[2*bi+1]; // Get corresponding sizes and minimum of them int nrows = RC->size[i]; int ncols = CC->size[j]; int shape[2] = {nrows, ncols}; double *D; #pragma omp critical { D = alloc_D+offset_D; offset_D += nrows*ncols; //array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D); //offset_D += near_D[lbi]->size; } array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D); #ifdef OPENMP double time0 = omp_get_wtime(); #endif kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j], RD, CD, D, nrows); #ifdef OPENMP double time1 = omp_get_wtime(); #pragma omp critical kernel_time += time1-time0; #endif } } // Change sizes of far_rank, far_U and far_V if there were false // far-field blocks lbj = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) { if(far_rank[lbi] == -1) lbj++; else { int shape_U[2] = {far_U[lbi]->shape[0], far_rank[lbi]}; int shape_V[2] = {far_V[lbi]->shape[0], far_rank[lbi]}; array_from_buffer(far_U+lbi-lbj, 2, shape_U, 'd', 'F', far_U[lbi]->data); array_from_buffer(far_V+lbi-lbj, 2, shape_V, 'd', 'F', far_V[lbi]->data); far_rank[lbi-lbj] = far_rank[lbi]; } } if(nblocks_false_far_local > 0 && new_nblocks_far_local > 0) { STARSH_REALLOC(far_rank, new_nblocks_far_local); STARSH_REALLOC(far_U, new_nblocks_far_local); STARSH_REALLOC(far_V, new_nblocks_far_local); } // If all far-field blocks are false, then dealloc buffers if(new_nblocks_far_local == 0 && nblocks_far_local > 0) { block_far = NULL; free(far_rank); far_rank = NULL; free(far_U); far_U = NULL; free(far_V); far_V = NULL; free(alloc_U); alloc_U = NULL; free(alloc_V); alloc_V = NULL; } // Dealloc list of false far-field blocks if it is not empty if(nblocks_false_far > 0) free(false_far); if(nblocks_false_far_local > 0) free(false_far_local); // Finish with creating instance of Block Low-Rank Matrix with given // buffers #ifdef OPENMP double mpi_drsdd_time = 0, mpi_kernel_time = 0; MPI_Reduce(&drsdd_time, &mpi_drsdd_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Reduce(&kernel_time, &mpi_kernel_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if(mpi_rank == 0) { //STARSH_WARNING("DRSDD kernel total time: %e secs", mpi_drsdd_time); //STARSH_WARNING("MATRIX kernel total time: %e secs", mpi_kernel_time); } #endif return starsh_blrm_new_mpi(matrix, F, far_rank, far_U, far_V, onfly, near_D, alloc_U, alloc_V, alloc_D, '1'); }
remarks_parallel_in_multiple_target_state_machines.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // RUN: %clang_cc1 -fexperimental-new-pass-manager -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void bar1(void) { // all-remark {{[OMP100] Potentially unknown OpenMP target region caller}} #pragma omp parallel // #0 // all-remark@#0 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // safe-remark@#0 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} // force-remark@#0 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}} // force-remark@#0 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: <NONE>}} { } } void bar2(void) { // all-remark {{[OMP100] Potentially unknown OpenMP target region caller}} #pragma omp parallel // #1 // all-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // safe-remark@#1 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} // force-remark@#1 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}} // force-remark@#1 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__6_wrapper, kernel ID: <NONE>}} { } } void foo1(void) { #pragma omp target teams // #2 // all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} // all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #3 // all-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} { } bar1(); #pragma omp parallel // #4 // all-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { } } } void foo2(void) { #pragma omp target teams // #5 // all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}} // all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #6 // all-remark@#6 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#6 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); #pragma omp parallel // #7 // all-remark@#7 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#7 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); } } void foo3(void) { #pragma omp target teams // #8 // all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}} // all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #9 // all-remark@#9 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#9 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); #pragma omp parallel // #10 // all-remark@#10 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#10 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // all-remark@* 3 {{OpenMP runtime call __kmpc_global_thread_num moved to}} // all-remark@* 3 {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
core_zlange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /***************************************************************************//** * * @ingroup core_lange * * Calculates max, one, infinity or Frobenius norm of a given matrix. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] A * The m-by-n matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] work * The auxiliary work array. * * @param[out] value * The specified norm of the given matrix A * ******************************************************************************/ void core_zlange(plasma_enum_t norm, int m, int n, const plasma_complex64_t *A, int lda, double *work, double *value) { *value = LAPACKE_zlange_work(LAPACK_COL_MAJOR, lapack_const(norm), m, n, A, lda, work); } /******************************************************************************/ void core_omp_zlange(int norm, int m, int n, const plasma_complex64_t *A, int lda, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) core_zlange(norm, m, n, A, lda, work, value); } } /******************************************************************************/ void core_omp_zlange_aux(int norm, int m, int n, const plasma_complex64_t *A, int lda, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { for (int j = 0; j < n; j++) { value[j] = cabs(A[lda*j]); for (int i = 1; i < m; i++) { value[j] += cabs(A[lda*j+i]); } } } } break; case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:m]) { if (sequence->status == PlasmaSuccess) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { value[i] += cabs(A[lda*j+i]); } } } } break; } }
SupportReportC.c
/// PMlib report controll routine for C program /// - [1] stop the Root section /// - [2] merge thread serial/parallel sections /// - [3] select the type of the report and start producing the report /// /// @param[in] char* fc output file name /// /// @note if fc is NULL ("") , stdout is chosen. /// #include <stdio.h> #include <unistd.h> #include <stdbool.h> // #include "pmlib_api_C.h" void C_pm_report (char *filename); extern void C_pm_serial_parallel (int id, int *mid, int *inside); extern void C_pm_stop_Root (void); extern void C_pm_sections (int *nSections); extern void C_pm_mergethreads (int id); extern void C_pm_select_report (char *filename); void C_pm_report (char *filename) { int id, mid, inside; int nSections; // stop the Root section before report id = 0; C_pm_serial_parallel (id, &mid, &inside); if (inside==0) { C_pm_stop_Root (); ; } else if (inside==1) { #pragma omp parallel C_pm_stop_Root (); ; } else { ; } // count the number of SHARED sections C_pm_sections (&nSections); // merge thread data into the master thread for (id=0; id<nSections; id++) { C_pm_serial_parallel (id, &mid, &inside); if (inside==0) { // The section is defined outside of parallel context C_pm_mergethreads (id); } else if (inside==1) { // The section is defined inside parallel context // If an OpenMP parallel region is started by a C routine, // the merge operation must be triggered by a C routine, // which is outside of PMlib C++ class parallel context // The followng OpenMP parallel block profives such merging support. #pragma omp parallel C_pm_mergethreads (id); ; } else { ; } } // end of for loop // now start reporting the PMlib stats C_pm_select_report (filename); return; }
Example_copyprivate.2.c
/* * @@name: copyprivate.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <stdio.h> #include <stdlib.h> float read_next( ) { float * tmp; float return_val; #pragma omp single copyprivate(tmp) { tmp = (float *) malloc(sizeof(float)); } /* copies the pointer only */ #pragma omp master { scanf("%f", tmp); } #pragma omp barrier return_val = *tmp; #pragma omp barrier #pragma omp single nowait { free(tmp); } return return_val; }
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
core_clanhe.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlanhe.c, normal z -> c, Fri Sep 28 17:38:21 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ __attribute__((weak)) void plasma_core_clanhe(plasma_enum_t norm, plasma_enum_t uplo, int n, const plasma_complex32_t *A, int lda, float *work, float *value) { *value = LAPACKE_clanhe_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), n, A, lda, work); } /******************************************************************************/ void plasma_core_omp_clanhe(plasma_enum_t norm, plasma_enum_t uplo, int n, const plasma_complex32_t *A, int lda, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) plasma_core_clanhe(norm, uplo, n, A, lda, work, value); } } /******************************************************************************/ void plasma_core_omp_clanhe_aux(plasma_enum_t norm, plasma_enum_t uplo, int n, const plasma_complex32_t *A, int lda, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { if (uplo == PlasmaUpper) { for (int i = 0; i < n; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < j; i++) { value[i] += cabsf(A[lda*j+i]); value[j] += cabsf(A[lda*j+i]); } value[j] += fabsf(creal(A[lda*j+j])); } } else { // PlasmaLower for (int i = 0; i < n; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { value[j] += fabsf(creal(A[lda*j+j])); for (int i = j+1; i < n; i++) { value[i] += cabsf(A[lda*j+i]); value[j] += cabsf(A[lda*j+i]); } } } } } break; } }
iwork_fmt_plug.c
/* JtR format to crack iWork '09, and '13 / '14 files. * * This software is Copyright (c) 2015, Dhiru Kholia <kholia at kth.se> and * Maxime Hulliger <hulliger at kth.se>, and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code may be freely used and modified for any purpose. * * Big thanks to Sean Patrick O'Brien for making this format possible. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_iwork; #elif FMT_REGISTERS_H john_register_one(&fmt_iwork); #else #include <string.h> #include <assert.h> #include <errno.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "pbkdf2_hmac_sha1.h" #include "jumbo.h" #include "memdbg.h" #include "iwork_common.h" #define FORMAT_LABEL "iwork" #define FORMAT_NAME "Apple iWork '09 / '13 / '14" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*fctx) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct format_context *fctx; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { fctx = (struct format_context *)salt; } static void iwork_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int iwork_decrypt(struct format_context *fctx, unsigned char *key, unsigned char *iv, unsigned char *data) { unsigned char out[BLOBLEN]; unsigned char ivec[IVLEN]; uint8_t hash[32]; SHA256_CTX ctx; AES_KEY aes_decrypt_key; AES_set_decrypt_key(key, 128, &aes_decrypt_key); memcpy(ivec, iv, 16); AES_cbc_encrypt(fctx->blob, out, BLOBLEN, &aes_decrypt_key, ivec, AES_DECRYPT); // The last 32 bytes should be equal to the SHA256 of the first 32 bytes (IWPasswordVerifier.m) SHA256_Init(&ctx); SHA256_Update(&ctx, out, 32); SHA256_Final(hash, &ctx); return memcmp(hash, &out[32], 32) == 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*cracked_count); #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][16]; int i; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; pout[i] = master[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, fctx->salt, fctx->salt_length, fctx->iterations, pout, 16, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), fctx->salt, fctx->salt_length, fctx->iterations, master[i], 16, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cracked[index+i] = iwork_decrypt(fctx, master[i], fctx->iv, fctx->blob); } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_iwork = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, iwork_tests }, { init, done, fmt_default_reset, fmt_default_prepare, iwork_common_valid, fmt_default_split, fmt_default_binary, iwork_common_get_salt, { iwork_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, iwork_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
SeparableFilter.h
/* * (C) 2007 Niels Martin Hansen * (C) 2013-2017 see Authors.txt * * This file is part of MPC-BE. * * MPC-BE is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * MPC-BE is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #pragma once #include <math.h> #define LIBDIVIDE_USE_SSE2 1 #include "libdivide.h" /* // Filter an image in horizontal direction with a one-dimensional filter // PixelWidth is the distance in bytes between pixels template<ptrdiff_t PixelDist> void SeparableFilterX(unsigned char* src, unsigned char* dst, int width, int height, ptrdiff_t stride, short* kernel, int kernel_size, int divisor) { int* tmp = DNew int[width]; for (int y = 0; y < height; y++) { ZeroMemory(tmp, width * sizeof(int)); const unsigned char* in = src + y * stride; unsigned char* out = dst + y * stride; for (int k = 0; k < kernel_size; k++) { int xOffset = k - kernel_size / 2; int xStart = 0; int xEnd = width; if (xOffset < 0) { xEnd += xOffset; } else if (xOffset > 0) { xStart += xOffset; } for (int x = xStart; x < xEnd; x++) { tmp[x - xOffset] += (int)(in[x * PixelDist] * kernel[k]); } } for (int x = 0; x < width; x++) { int accum = tmp[x] / divisor; if (accum > 255) { accum = 255; } else if (accum < 0) { accum = 0; } out[x * PixelDist] = (unsigned char)accum; } } delete [] tmp; } // Filter an image in vertical direction with a one-dimensional filter // PixelWidth is the distance in bytes between pixels template<ptrdiff_t PixelDist> void SeparableFilterY(unsigned char* src, unsigned char* dst, int width, int height, ptrdiff_t stride, short* kernel, int kernel_size, int divisor) { int* tmp = DNew int[width]; for (int y = 0; y < height; y++) { ZeroMemory(tmp, width * sizeof(int)); const unsigned char* in = src + y * stride; unsigned char* out = dst + y * stride; int kOffset = kernel_size / 2; int kStart = 0; int kEnd = kernel_size; if (y < kOffset) { // 0 > y - kOffset kStart += kOffset - y; } else if (height <= y + kOffset) { kEnd -= kOffset + y + 1 - height; } for (int k = kStart; k < kEnd; k++) { for (int x = 0; x < width; x++) { tmp[x] += (int)(in[(k - kOffset) * stride + x * PixelDist] * kernel[k]); } } for (int x = 0; x < width; x++) { int accum = tmp[x] / divisor; if (accum > 255) { accum = 255; } else if (accum < 0) { accum = 0; } out[x * PixelDist] = (unsigned char)accum; } } delete [] tmp; } */ // Filter an image in horizontal direction with a one-dimensional filter void SeparableFilterX_SSE2(unsigned char* src, unsigned char* dst, int width, int height, ptrdiff_t stride, short* kernel, int kernel_size, int divisor) { int width16 = width & ~15; int* tmp = (int*)_aligned_malloc(stride * sizeof(int), 16); libdivide::divider<int> divisorLibdivide(divisor); for (int y = 0; y < height; y++) { ZeroMemory(tmp, stride * sizeof(int)); const unsigned char* in = src + y * stride; unsigned char* out = dst + y * stride; for (int k = 0; k < kernel_size; k++) { int xOffset = k - kernel_size / 2; int xStart = 0; int xEnd = width; if (xOffset < 0) { xEnd += xOffset; } else if (xOffset > 0) { xStart += xOffset; } int xStart16 = (xStart + 15) & ~15; int xEnd16 = xEnd & ~15; if (xStart16 >= xEnd16) { // Don't use SSE2 at all xStart16 = xEnd16 = xEnd; } for (int x = xStart; x < xStart16; x++) { tmp[x - xOffset] += (int)(in[x] * kernel[k]); } __m128i coeff = _mm_set1_epi16(kernel[k]); for (int x = xStart16; x < xEnd16; x += 16) { // Load 16 values __m128i data16 = _mm_load_si128((__m128i*)&in[x]); // Multiply the first 8 values by the coefficient to get 8 32-bit integers __m128i data8 = _mm_unpacklo_epi8(data16, _mm_setzero_si128()); __m128i resLo = _mm_mullo_epi16(data8, coeff); __m128i resHi = _mm_mulhi_epi16(data8, coeff); __m128i res32bitLo = _mm_unpacklo_epi16(resLo, resHi); __m128i res32bitHi = _mm_unpackhi_epi16(resLo, resHi); // Load the 4 32-bit integers values, add the values we computed and store them back __m128i res = _mm_loadu_si128((__m128i*)&tmp[x - xOffset]); res = _mm_add_epi32(res, res32bitLo); _mm_storeu_si128((__m128i*)&tmp[x - xOffset], res); // Repeat the same operation for the next 4 values res = _mm_loadu_si128((__m128i*)&tmp[x - xOffset + 4]); res = _mm_add_epi32(res, res32bitHi); _mm_storeu_si128((__m128i*)&tmp[x - xOffset + 4], res); // Multiply the next 8 values by the coefficient to get 8 32-bit integers data8 = _mm_unpackhi_epi8(data16, _mm_setzero_si128()); resLo = _mm_mullo_epi16(data8, coeff); resHi = _mm_mulhi_epi16(data8, coeff); res32bitLo = _mm_unpacklo_epi16(resLo, resHi); res32bitHi = _mm_unpackhi_epi16(resLo, resHi); // Load the 4 32-bit integers values, add the values we computed and store them back res = _mm_loadu_si128((__m128i*)&tmp[x - xOffset + 8]); res = _mm_add_epi32(res, res32bitLo); _mm_storeu_si128((__m128i*)&tmp[x - xOffset + 8], res); // Repeat the same operation for the next 4 values res = _mm_loadu_si128((__m128i*)&tmp[x - xOffset + 12]); res = _mm_add_epi32(res, res32bitHi); _mm_storeu_si128((__m128i*)&tmp[x - xOffset + 12], res); } for (int x = xEnd16; x < xEnd; x++) { tmp[x - xOffset] += (int)(in[x] * kernel[k]); } } for (int x = 0; x < width16; x += 16) { // Load 4 32-bit integer values and divide them __m128i accum1 = _mm_load_si128((__m128i*)&tmp[x]); accum1 = accum1 / divisorLibdivide; // Repeat the same operation on the next 4 32-bit integer values __m128i accum2 = _mm_load_si128((__m128i*)&tmp[x + 4]); accum2 = accum2 / divisorLibdivide; // Pack the 8 32-bit integers into 8 16-bit integers accum1 = _mm_packs_epi32(accum1, accum2); // Load 4 32-bit integer values and divide them __m128i accum3 = _mm_load_si128((__m128i*)&tmp[x + 8]); accum3 = accum3 / divisorLibdivide; // Repeat the same operation on the next 4 32-bit integer values __m128i accum4 = _mm_load_si128((__m128i*)&tmp[x + 12]); accum4 = accum4 / divisorLibdivide; // Pack the 8 32-bit integers into 8 16-bit integers accum3 = _mm_packs_epi32(accum3, accum4); // Pack the 16 16-bit integers into 16 8-bit unsigned integers accum1 = _mm_packus_epi16(accum1, accum3); // Store the 16 8-bit unsigned integers _mm_store_si128((__m128i*)&out[x], accum1); } for (int x = width16; x < width; x++) { int accum = tmp[x] / divisor; if (accum > 255) { accum = 255; } else if (accum < 0) { accum = 0; } out[x] = (unsigned char)accum; } } _aligned_free(tmp); } // Filter an image in vertical direction with a one-dimensional filter void SeparableFilterY_SSE2(unsigned char* src, unsigned char* dst, int width, int height, ptrdiff_t stride, short* kernel, int kernel_size, int divisor) { int width16 = width & ~15; int* tmp = (int*)_aligned_malloc(stride * sizeof(int), 16); libdivide::divider<int> divisorLibdivide(divisor); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < height; y++) { ZeroMemory(tmp, stride * sizeof(int)); const unsigned char* in = src + y * stride; unsigned char* out = dst + y * stride; int kOffset = kernel_size / 2; int kStart = 0; int kEnd = kernel_size; if (y < kOffset) { // 0 > y - kOffset kStart += kOffset - y; } else if (height <= y + kOffset) { kEnd -= kOffset + y + 1 - height; } for (int k = kStart; k < kEnd; k++) { __m128i coeff = _mm_set1_epi16(kernel[k]); for (int x = 0; x < width16; x += 16) { // Load 16 values __m128i data16 = _mm_load_si128((__m128i*)&in[(k - kOffset) * stride + x]); // Multiply the first 8 values by the coefficient to get 8 32-bit integers __m128i data8 = _mm_unpacklo_epi8(data16, _mm_setzero_si128()); __m128i resLo = _mm_mullo_epi16(data8, coeff); __m128i resHi = _mm_mulhi_epi16(data8, coeff); __m128i res32bitLo = _mm_unpacklo_epi16(resLo, resHi); __m128i res32bitHi = _mm_unpackhi_epi16(resLo, resHi); // Load the 4 32-bit integers values, add the values we computed and store them back __m128i res = _mm_load_si128((__m128i*)&tmp[x]); res = _mm_add_epi32(res, res32bitLo); _mm_store_si128((__m128i*)&tmp[x], res); // Repeat the same operation for the next 4 values res = _mm_load_si128((__m128i*)&tmp[x + 4]); res = _mm_add_epi32(res, res32bitHi); _mm_store_si128((__m128i*)&tmp[x + 4], res); // Multiply the next 8 values by the coefficient to get 8 32-bit integers data8 = _mm_unpackhi_epi8(data16, _mm_setzero_si128()); resLo = _mm_mullo_epi16(data8, coeff); resHi = _mm_mulhi_epi16(data8, coeff); res32bitLo = _mm_unpacklo_epi16(resLo, resHi); res32bitHi = _mm_unpackhi_epi16(resLo, resHi); // Load the 4 32-bit integers values, add the values we computed and store them back res = _mm_load_si128((__m128i*)&tmp[x + 8]); res = _mm_add_epi32(res, res32bitLo); _mm_store_si128((__m128i*)&tmp[x + 8], res); // Repeat the same operation for the next 4 values res = _mm_load_si128((__m128i*)&tmp[x + 12]); res = _mm_add_epi32(res, res32bitHi); _mm_store_si128((__m128i*)&tmp[x + 12], res); } for (int x = width16; x < width; x++) { tmp[x] += (int)(in[(k - kOffset) * stride + x] * kernel[k]); } } for (int x = 0; x < width16; x += 16) { // Load 4 32-bit integer values and divide them __m128i accum1 = _mm_load_si128((__m128i*)&tmp[x]); accum1 = accum1 / divisorLibdivide; // Repeat the same operation on the next 4 32-bit integer values __m128i accum2 = _mm_load_si128((__m128i*)&tmp[x + 4]); accum2 = accum2 / divisorLibdivide; // Pack the 8 32-bit integers into 8 16-bit integers accum1 = _mm_packs_epi32(accum1, accum2); // Load 4 32-bit integer values and divide them __m128i accum3 = _mm_load_si128((__m128i*)&tmp[x + 8]); accum3 = accum3 / divisorLibdivide; // Repeat the same operation on the next 4 32-bit integer values __m128i accum4 = _mm_load_si128((__m128i*)&tmp[x + 12]); accum4 = accum4 / divisorLibdivide; // Pack the 8 32-bit integers into 8 16-bit integers accum3 = _mm_packs_epi32(accum3, accum4); // Pack the 16 16-bit integers into 16 8-bit unsigned integers accum1 = _mm_packus_epi16(accum1, accum3); // Store the 16 8-bit unsigned integers _mm_store_si128((__m128i*)&out[x], accum1); } for (int x = width16; x < width; x++) { int accum = tmp[x] / divisor; if (accum > 255) { accum = 255; } else if (accum < 0) { accum = 0; } out[x] = (unsigned char)accum; } } _aligned_free(tmp); } static inline double NormalDist(double sigma, double x) { if (sigma <= 0.0 && x == 0.0) { return 1.0; } else if (sigma <= 0.0) { return 0.0; } else { return exp(-(x * x) / (2 * sigma * sigma)) / (sigma * sqrt(2 * M_PI)); } } struct GaussianKernel { short* kernel; int width; int divisor; inline GaussianKernel(double sigma) { width = (int)(sigma * 3.0 + 0.5) | 1; // binary-or with 1 to make sure the number is odd if (width < 3) { width = 3; } kernel = DNew short[width]; kernel[width / 2] = (short)(NormalDist(sigma, 0.0) * 255); divisor = kernel[width / 2]; for (int x = width / 2 - 1; x >= 0; x--) { short val = (short)(NormalDist(sigma, width / 2 - x) * 255 + 0.5); divisor += val * 2; kernel[x] = val; kernel[width - x - 1] = val; } if (divisor == 0) { divisor = 1; } } inline ~GaussianKernel() { delete [] kernel; } };
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin - pragma pack_matrix. // Add both row/col to identify the default case which no pragma. bool PackMatrixRowMajorPragmaOn = false; // True when \#pragma pack_matrix(row_major) on. bool PackMatrixColMajorPragmaOn = false; // True when \#pragma pack_matrix(column_major) on. // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, TypeSourceInfo* TInfo, bool isParameter); bool DiagnoseHLSLLookup(const LookupResult &R); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
genprimes.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> //main int main(int argc, char *argv[]) { if(argc > 3){ printf("There are too many arguments!\n"); exit(0); } else if(argc < 3){ printf("There are not enough arguments!\n"); exit(0); } FILE* f; int n = atoi(argv[1]); int numOfThreads = atoi(argv[2]); double tstart = 0.0; double ttaken; int stop = floor((n+1)/2); int rank = 1; printf("N:%d and numOfThreads:%d.\n", n, numOfThreads); int numberArr[n+1]; int primes[n]; int c[n]; primes[0] = 2; tstart = omp_get_wtime(); #pragma omp parallel for num_threads(numOfThreads) for(int i = 2; i < n+1; i++){ if(numberArr[i] != 1){ numberArr[i] = 1; // printf("numberArr[%d] is %d.\n", i, numberArr[i]); } } for(int i = 2; i < n + 1; i++){ if(numberArr[i] == 1){ primes[rank] = i; c[rank] = primes[rank] - primes[rank - 1]; // printf("Adding prime number %d in primes[%d] and c[%d] = %d.\n",i, rank, rank, c[rank]); rank++; if(i <= stop){ #pragma omp parallel for num_threads(numOfThreads) for(int j = i + 1; j < n+1; j++){ if(j % i == 0) numberArr[j] = 0; } // printf("(i=%d)Time after parallel section:%f.\n", i, omp_get_wtime()-tstart); } else continue; } // printf("Time after one full loop run:%f.\n",omp_get_wtime()-tstart); } ttaken = omp_get_wtime() - tstart; printf("Time taken for the main part %f.\n",ttaken); char output[100]; sprintf(output, "%d.sol", n); f = fopen(output, "w"); if(!f){ printf("Cannot create the file %s.\n", output); exit(1); } for( int k = 1; k < rank; k++){ fprintf(f, "%d, %d, %d\n", k, primes[k], c[k]); } return 0; }
DataVectorOps.h
/***************************************************************************** * * Copyright (c) 2003-2018 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014 by Centre for Geoscience Computing (GeoComp) * *****************************************************************************/ #ifndef __ESCRIPT_DATAMATHS_H__ #define __ESCRIPT_DATAMATHS_H__ #include "DataAbstract.h" #include "DataException.h" #include "ArrayOps.h" #include "LapackInverseHelper.h" #include "DataTagged.h" #include <complex> /** \file DataVectorOps.h \brief Describes binary operations performed on DataVector. For operations on DataReady see BinaryDataReadyOp.h. For operations on double* see ArrayOps.h. */ namespace escript { /** In order to properly identify the datapoints, in most cases, the vector, shape and offset of the point must all be supplied. Note that vector in this context refers to a data vector storing datapoints not a mathematical vector. (However, datapoints within the data vector could represent scalars, vectors, matricies, ...). */ /** \brief Perform a matrix multiply of the given views. NB: Only multiplies together the two given datapoints, would need to call this over all data-points to multiply the entire Data objects involved. \param left,right - vectors containing the datapoints \param leftShape,rightShape - shapes of datapoints in the vectors \param leftOffset,rightOffset - beginnings of datapoints in the vectors \param result - Vector to store the resulting datapoint in \param resultShape - expected shape of the resulting datapoint */ ESCRIPT_DLL_API void matMult(const DataTypes::RealVectorType& left, const DataTypes::ShapeType& leftShape, DataTypes::RealVectorType::size_type leftOffset, const DataTypes::RealVectorType& right, const DataTypes::ShapeType& rightShape, DataTypes::RealVectorType::size_type rightOffset, DataTypes::RealVectorType& result, const DataTypes::ShapeType& resultShape); // Hmmmm why is there no offset for the result?? /** \brief Determine the shape of the result array for a matrix multiplication of the given views. \param left,right - shapes of the left and right matricies \return the shape of the matrix which would result from multiplying left and right */ ESCRIPT_DLL_API DataTypes::ShapeType determineResultShape(const DataTypes::ShapeType& left, const DataTypes::ShapeType& right); /** \brief computes a symmetric matrix from your square matrix A: (A + transpose(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ template<typename VEC> inline void symmetric(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset) { if (DataTypes::getRank(inShape) == 2) { int i0, i1; int s0=inShape[0]; int s1=inShape[1]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1)] + in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]) / 2.0; } } } else if (DataTypes::getRank(inShape) == 4) { int i0, i1, i2, i3; int s0=inShape[0]; int s1=inShape[1]; int s2=inShape[2]; int s3=inShape[3]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i3)] + in[inOffset+DataTypes::getRelIndex(inShape,i2,i3,i0,i1)]) / 2.0; } } } } } } /** \brief computes a antisymmetric matrix from your square matrix A: (A - transpose(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ template<typename VEC> inline void antisymmetric(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset) { if (DataTypes::getRank(inShape) == 2) { int i0, i1; int s0=inShape[0]; int s1=inShape[1]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1)] - in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]) / 2.0; } } } else if (DataTypes::getRank(inShape) == 4) { int i0, i1, i2, i3; int s0=inShape[0]; int s1=inShape[1]; int s2=inShape[2]; int s3=inShape[3]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i3)] - in[inOffset+DataTypes::getRelIndex(inShape,i2,i3,i0,i1)]) / 2.0; } } } } } } /** \brief computes an hermitian matrix from your square matrix A: (A + adjoint(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ void hermitian(const DataTypes::CplxVectorType& in, const DataTypes::ShapeType& inShape, DataTypes::CplxVectorType::size_type inOffset, DataTypes::CplxVectorType& ev, const DataTypes::ShapeType& evShape, DataTypes::CplxVectorType::size_type evOffset); /** \brief computes a antihermitian matrix from your square matrix A: (A - adjoint(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ void antihermitian(const DataTypes::CplxVectorType& in, const DataTypes::ShapeType& inShape, typename DataTypes::CplxVectorType::size_type inOffset, DataTypes::CplxVectorType& ev, const DataTypes::ShapeType& evShape, typename DataTypes::CplxVectorType::size_type evOffset); /** \brief computes the trace of a matrix \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing the output matrix in vector ev \param axis_offset */ template <class VEC> inline void trace(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset, int axis_offset) { for (int j=0;j<DataTypes::noValues(evShape);++j) { ev[evOffset+j]=0; } if (DataTypes::getRank(inShape) == 2) { int s0=inShape[0]; // Python wrapper limits to square matrix int i; for (i=0; i<s0; i++) { ev[evOffset/*+DataTypes::getRelIndex(evShape)*/] += in[inOffset+DataTypes::getRelIndex(inShape,i,i)]; } } else if (DataTypes::getRank(inShape) == 3) { if (axis_offset==0) { int s0=inShape[0]; int s2=inShape[2]; int i0, i2; for (i0=0; i0<s0; i0++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i2)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i0,i2)]; } } } else if (axis_offset==1) { int s0=inShape[0]; int s1=inShape[1]; int i0, i1; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i1)]; } } } } else if (DataTypes::getRank(inShape) == 4) { if (axis_offset==0) { int s0=inShape[0]; int s2=inShape[2]; int s3=inShape[3]; int i0, i2, i3; for (i0=0; i0<s0; i0++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i2,i3)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i0,i2,i3)]; } } } } else if (axis_offset==1) { int s0=inShape[0]; int s1=inShape[1]; int s3=inShape[3]; int i0, i1, i3; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i3)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i1,i3)]; } } } } else if (axis_offset==2) { int s0=inShape[0]; int s1=inShape[1]; int s2=inShape[2]; int i0, i1, i2; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i2)]; } } } } } } /** \brief Transpose each data point of this Data object around the given axis. \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing the output matrix in vector ev \param axis_offset */ ESCRIPT_DLL_API template <class VEC> inline void transpose(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset, int axis_offset) { int inRank=DataTypes::getRank(inShape); if ( inRank== 4) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int s3=evShape[3]; int i0, i1, i2, i3; if (axis_offset==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i3,i0,i1,i2)]; } } } } } else if (axis_offset==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i3,i0,i1)]; } } } } } else if (axis_offset==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i2,i3,i0)]; } } } } } else { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i3)]; } } } } } } else if (inRank == 3) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int i0, i1, i2; if (axis_offset==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i0,i1)]; } } } } else if (axis_offset==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i2,i0)]; } } } } else { // Copy the matrix unchanged for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2)]; } } } } } else if (inRank == 2) { int s0=evShape[0]; int s1=evShape[1]; int i0, i1; if (axis_offset==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]; } } } else { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1)]; } } } } else if (inRank == 1) { int s0=evShape[0]; int i0; for (i0=0; i0<s0; i0++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0)] = in[inOffset+DataTypes::getRelIndex(inShape,i0)]; } } else if (inRank == 0) { ev[evOffset/*+DataTypes::getRelIndex(evShape,)*/] = in[inOffset/*+DataTypes::getRelIndex(inShape,)*/]; } else { throw DataException("Error - DataArrayView::transpose can only be calculated for rank 0, 1, 2, 3 or 4 objects."); } } /** \brief swaps the components axis0 and axis1. \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing the output matrix in vector ev \param axis0 - axis index \param axis1 - axis index */ ESCRIPT_DLL_API template <class VEC> inline void swapaxes(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset, int axis0, int axis1) { int inRank=DataTypes::getRank(inShape); if (inRank == 4) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int s3=evShape[3]; int i0, i1, i2, i3; if (axis0==0) { if (axis1==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0,i2,i3)]; } } } } } else if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i1,i0,i3)]; } } } } } else if (axis1==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i3,i1,i2,i0)]; } } } } } } else if (axis0==1) { if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i2,i1,i3)]; } } } } } else if (axis1==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i3,i2,i1)]; } } } } } } else if (axis0==2) { if (axis1==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i3,i2)]; } } } } } } } else if ( inRank == 3) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int i0, i1, i2; if (axis0==0) { if (axis1==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0,i2)]; } } } } else if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i1,i0)]; } } } } } else if (axis0==1) { if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i2,i1)]; } } } } } } else if ( inRank == 2) { int s0=evShape[0]; int s1=evShape[1]; int i0, i1; if (axis0==0) { if (axis1==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]; } } } } } else { throw DataException("Error - DataArrayView::swapaxes can only be calculated for rank 2, 3 or 4 objects."); } } /** \brief solves a local eigenvalue problem \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the eigenvalues \param evShape - expected shape of the eigenvalues \param evOffset - starting location for storing the eigenvalues in vector ev */ ESCRIPT_DLL_API inline void eigenvalues(const DataTypes::RealVectorType& in, const DataTypes::ShapeType& inShape, typename DataTypes::RealVectorType::size_type inOffset, DataTypes::RealVectorType& ev, const DataTypes::ShapeType& evShape, typename DataTypes::RealVectorType::size_type evOffset) { typename DataTypes::RealVectorType::ElementType in00,in10,in20,in01,in11,in21,in02,in12,in22; typename DataTypes::RealVectorType::ElementType ev0,ev1,ev2; int s=inShape[0]; if (s==1) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; eigenvalues1(in00,&ev0); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; } else if (s==2) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; eigenvalues2(in00,(in01+in10)/2.,in11,&ev0,&ev1); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; } else if (s==3) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in20=in[inOffset+DataTypes::getRelIndex(inShape,2,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; in21=in[inOffset+DataTypes::getRelIndex(inShape,2,1)]; in02=in[inOffset+DataTypes::getRelIndex(inShape,0,2)]; in12=in[inOffset+DataTypes::getRelIndex(inShape,1,2)]; in22=in[inOffset+DataTypes::getRelIndex(inShape,2,2)]; eigenvalues3(in00,(in01+in10)/2.,(in02+in20)/2.,in11,(in21+in12)/2.,in22, &ev0,&ev1,&ev2); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; ev[evOffset+DataTypes::getRelIndex(evShape,2)]=ev2; } } inline void eigenvalues(const DataTypes::CplxVectorType& in, const DataTypes::ShapeType& inShape, typename DataTypes::CplxVectorType::size_type inOffset, DataTypes::CplxVectorType& ev, const DataTypes::ShapeType& evShape, typename DataTypes::CplxVectorType::size_type evOffset) { typename DataTypes::CplxVectorType::ElementType in00,in10,in20,in01,in11,in21,in02,in12,in22; typename DataTypes::CplxVectorType::ElementType ev0,ev1,ev2; int s=inShape[0]; if (s==1) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; eigenvalues1(in00,&ev0); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; } else if (s==2) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; eigenvalues2(in00,(in01+in10)/2.,in11,&ev0,&ev1); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; } else if (s==3) { // this doesn't work yet // in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; // in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; // in20=in[inOffset+DataTypes::getRelIndex(inShape,2,0)]; // in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; // in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; // in21=in[inOffset+DataTypes::getRelIndex(inShape,2,1)]; // in02=in[inOffset+DataTypes::getRelIndex(inShape,0,2)]; // in12=in[inOffset+DataTypes::getRelIndex(inShape,1,2)]; // in22=in[inOffset+DataTypes::getRelIndex(inShape,2,2)]; // eigenvalues3(in00,(in01+in10)/2.,(in02+in20)/2.,in11,(in21+in12)/2.,in22, // &ev0,&ev1,&ev2); // ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; // ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; // ev[evOffset+DataTypes::getRelIndex(evShape,2)]=ev2; } } /** \brief solves a local eigenvalue problem \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the eigenvalues \param evShape - expected shape of the eigenvalues \param evOffset - starting location for storing the eigenvalues in ev \param V - vector to store the eigenvectors \param VShape - expected shape of the eigenvectors \param VOffset - starting location for storing the eigenvectors in V \param tol - Input - eigenvalues with relative difference tol are treated as equal */ ESCRIPT_DLL_API inline void eigenvalues_and_eigenvectors(const DataTypes::RealVectorType& in, const DataTypes::ShapeType& inShape, DataTypes::RealVectorType::size_type inOffset, DataTypes::RealVectorType& ev, const DataTypes::ShapeType& evShape, DataTypes::RealVectorType::size_type evOffset, DataTypes::RealVectorType& V, const DataTypes::ShapeType& VShape, DataTypes::RealVectorType::size_type VOffset, const double tol=1.e-13) { double in00,in10,in20,in01,in11,in21,in02,in12,in22; double V00,V10,V20,V01,V11,V21,V02,V12,V22; double ev0,ev1,ev2; int s=inShape[0]; if (s==1) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; eigenvalues_and_eigenvectors1(in00,&ev0,&V00,tol); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; V[inOffset+DataTypes::getRelIndex(VShape,0,0)]=V00; } else if (s==2) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; eigenvalues_and_eigenvectors2(in00,(in01+in10)/2.,in11, &ev0,&ev1,&V00,&V10,&V01,&V11,tol); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; V[inOffset+DataTypes::getRelIndex(VShape,0,0)]=V00; V[inOffset+DataTypes::getRelIndex(VShape,1,0)]=V10; V[inOffset+DataTypes::getRelIndex(VShape,0,1)]=V01; V[inOffset+DataTypes::getRelIndex(VShape,1,1)]=V11; } else if (s==3) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in20=in[inOffset+DataTypes::getRelIndex(inShape,2,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; in21=in[inOffset+DataTypes::getRelIndex(inShape,2,1)]; in02=in[inOffset+DataTypes::getRelIndex(inShape,0,2)]; in12=in[inOffset+DataTypes::getRelIndex(inShape,1,2)]; in22=in[inOffset+DataTypes::getRelIndex(inShape,2,2)]; eigenvalues_and_eigenvectors3(in00,(in01+in10)/2.,(in02+in20)/2.,in11,(in21+in12)/2.,in22, &ev0,&ev1,&ev2, &V00,&V10,&V20,&V01,&V11,&V21,&V02,&V12,&V22,tol); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; ev[evOffset+DataTypes::getRelIndex(evShape,2)]=ev2; V[inOffset+DataTypes::getRelIndex(VShape,0,0)]=V00; V[inOffset+DataTypes::getRelIndex(VShape,1,0)]=V10; V[inOffset+DataTypes::getRelIndex(VShape,2,0)]=V20; V[inOffset+DataTypes::getRelIndex(VShape,0,1)]=V01; V[inOffset+DataTypes::getRelIndex(VShape,1,1)]=V11; V[inOffset+DataTypes::getRelIndex(VShape,2,1)]=V21; V[inOffset+DataTypes::getRelIndex(VShape,0,2)]=V02; V[inOffset+DataTypes::getRelIndex(VShape,1,2)]=V12; V[inOffset+DataTypes::getRelIndex(VShape,2,2)]=V22; } } /** Inline function definitions. */ template <class VEC> inline bool checkOffset(const VEC& data, const DataTypes::ShapeType& shape, typename VEC::size_type offset) { return (data.size() >= (offset+DataTypes::noValues(shape))); } /** * This assumes that all data involved have the same points per sample and same shape */ template <class ResVEC, class LVEC, class RSCALAR> void binaryOpVectorRightScalar(ResVEC& res, // where result is to be stored typename ResVEC::size_type resOffset, // offset in the result vector to start storing results const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type sampleSize, // number of values in each sample const LVEC& left, // LHS of calculation typename LVEC::size_type leftOffset, // where to start reading LHS values const RSCALAR* right, // RHS of the calculation const bool rightreset, // true if RHS is providing a single sample of 1 value only escript::ES_optype operation, // operation to perform bool singleleftsample) // set to false for normal operation { size_t substep=(rightreset?0:1); switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]+*rpos; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=pow(left[leftbase+j],*rpos); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]-*rpos; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j] * *rpos; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]/ *rpos; } } } break; default: throw DataException("Unsupported binary operation"); } } template<> void binaryOpVectorRightScalar(DataTypes::RealVectorType& res, // where result is to be stored typename DataTypes::RealVectorType::size_type resOffset, // offset in the result vector to start storing results const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type sampleSize, // number of values in each sample const DataTypes::RealVectorType& left, // LHS of calculation typename DataTypes::RealVectorType::size_type leftOffset, // where to start reading LHS values const DataTypes::real_t* right, // RHS of the calculation const bool rightreset, // true if RHS is providing a single sample of 1 value only escript::ES_optype operation, // operation to perform bool singleleftsample); /** * This assumes that all data involved have the same points per sample and same shape */ template <class ResVEC, class LSCALAR, class RVEC> void binaryOpVectorLeftScalar(ResVEC& res, // where result is to be stored typename ResVEC::size_type resOffset, // offset in the result vector to start storing results const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type sampleSize, // number of values in each sample const LSCALAR* left, // LHS of calculation const bool leftreset, // true if LHS is providing a single sample of 1 value only const RVEC& right, // RHS of the calculation typename RVEC::size_type rightOffset, // where to start reading RHS values escript::ES_optype operation, // operation to perform bool singlerightsample) // right consists of a single sample { size_t substep=(leftreset?0:1); switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos+right[rightbase+j]; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=pow(*lpos,right[rightbase+j]); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos-right[rightbase+j]; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos*right[rightbase+j]; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos/right[rightbase+j]; } } } break; default: throw DataException("Unsupported binary operation"); } } template <> void binaryOpVectorLeftScalar(DataTypes::RealVectorType& res, // where result is to be stored typename DataTypes::RealVectorType::size_type resOffset, // offset in the result vector to start storing results const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type sampleSize, // number of values in each sample const DataTypes::real_t* left, // LHS of calculation const bool leftreset, // true if LHS is providing a single sample of 1 value only const DataTypes::RealVectorType& right, // RHS of the calculation typename DataTypes::RealVectorType::size_type rightOffset, // where to start reading RHS values escript::ES_optype operation, // operation to perform bool singlerightsample); // right consists of a single sample /** * This assumes that all data involved have the same points per sample and same shape */ template <class ResVEC, class LVEC, class RVEC> void binaryOpVector(ResVEC& res, // where result is to be stored typename ResVEC::size_type resOffset, // offset in the result vector to start storing results const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type sampleSize, // number of values in each sample const LVEC& left, // LHS of calculation typename LVEC::size_type leftOffset, // where to start reading LHS values const bool leftreset, // Is LHS only supplying a single sample instead of a bunch of them const RVEC& right, // RHS of the calculation typename RVEC::size_type rightOffset, // where to start reading RHS values const bool rightreset, // Is RHS only supplying a single sample instead of a bunch of them escript::ES_optype operation) // operation to perform { switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]+right[rightbase+j]; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=pow(left[leftbase+j],right[rightbase+j]); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]-right[rightbase+j]; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]*right[rightbase+j]; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]/right[rightbase+j]; } } } break; default: throw DataException("Unsupported binary operation"); } } template <> void binaryOpVector(DataTypes::RealVectorType& res, // where result is to be stored typename DataTypes::RealVectorType::size_type resOffset, // offset in the result vector to start storing results const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type sampleSize, // number of values in each sample const DataTypes::RealVectorType& left, // LHS of calculation typename DataTypes::RealVectorType::size_type leftOffset, // where to start reading LHS values const bool leftreset, // Is LHS only supplying a single sample instead of a bunch of them const DataTypes::RealVectorType& right, // RHS of the calculation typename DataTypes::RealVectorType::size_type rightOffset, // where to start reading RHS values const bool rightreset, // Is RHS only supplying a single sample instead of a bunch of them escript::ES_optype operation); // operation to perform #define OPVECLAZYBODY(X) \ for (size_t j=0;j<onumsteps;++j)\ {\ for (size_t i=0;i<numsteps;++i,res+=resultStep) \ { \ for (size_t s=0; s<chunksize; ++s)\ {\ res[s] = X;\ }\ /* tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X);*/ \ lroffset+=leftstep; \ rroffset+=rightstep; \ }\ lroffset+=oleftstep;\ rroffset+=orightstep;\ } /** * This assumes that all data involved have the same points per sample and same shape * This version is to be called from within DataLazy. * It does not have openmp around loops because it will be evaluating individual samples * (Which will be done within an enclosing openmp region. */ template <class ResELT, class LELT, class RELT> void binaryOpVectorLazyArithmeticHelper(ResELT* res, const LELT* left, const RELT* right, const size_t chunksize, const size_t onumsteps, const size_t numsteps, const size_t resultStep, const size_t leftstep, const size_t rightstep, const size_t oleftstep, const size_t orightstep, size_t lroffset, size_t rroffset, escript::ES_optype operation) // operation to perform { switch (operation) { case ADD: OPVECLAZYBODY((left[lroffset+s]+right[rroffset+s])); break; case POW: OPVECLAZYBODY(pow(left[lroffset+s],right[rroffset+s])) break; case SUB: OPVECLAZYBODY(left[lroffset+s]-right[rroffset+s]) break; case MUL: OPVECLAZYBODY(left[lroffset+s]*right[rroffset+s]) break; case DIV: OPVECLAZYBODY(left[lroffset+s]/right[rroffset+s]) break; default: ESYS_ASSERT(false, "Invalid operation. This should never happen!"); // I can't throw here because this will be called inside a parallel section } } /** * This assumes that all data involved have the same points per sample and same shape * This version is to be called from within DataLazy. * It does not have openmp around loops because it will be evaluating individual samples * (Which will be done within an enclosing openmp region. */ template <class ResELT, class LELT, class RELT> void binaryOpVectorLazyRelationalHelper(ResELT* res, const LELT* left, const RELT* right, const size_t chunksize, const size_t onumsteps, const size_t numsteps, const size_t resultStep, const size_t leftstep, const size_t rightstep, const size_t oleftstep, const size_t orightstep, size_t lroffset, size_t rroffset, escript::ES_optype operation) // operation to perform { switch (operation) { case LESS: OPVECLAZYBODY(left[lroffset+s]<right[rroffset+s]) break; case GREATER: OPVECLAZYBODY(left[lroffset+s]>right[rroffset+s]) break; case GREATER_EQUAL: OPVECLAZYBODY(left[lroffset+s]>=right[rroffset+s]) break; case LESS_EQUAL: OPVECLAZYBODY(left[lroffset+s]<=right[rroffset+s]) break; default: ESYS_ASSERT(false, "Invalid operation. This should never happen!"); // I can't throw here because this will be called inside a parallel section } } /** * This assumes that all data involved have the same points per sample and same shape */ /* trying to make a single version for all Tagged+Expanded interactions */ template <class ResVEC, class LVEC, class RVEC> void binaryOpVectorTagged(ResVEC& res, // where result is to be stored const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type DPPSample, // number of datapoints per sample const typename ResVEC::size_type DPSize, // datapoint size const LVEC& left, // LHS of calculation bool leftscalar, const RVEC& right, // RHS of the calculation bool rightscalar, bool lefttagged, // true if left object is the tagged one const DataTagged& tagsource, // where to get tag offsets from escript::ES_optype operation) // operation to perform { typename ResVEC::size_type lstep=leftscalar?1:DPSize; typename ResVEC::size_type rstep=rightscalar?1:DPSize; typename ResVEC::size_type limit=samplesToProcess*DPPSample; switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]+right[rightbase+j*(!rightscalar)]; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=pow(left[leftbase+j*(!leftscalar)],right[rightbase+j*(!rightscalar)]); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]-right[rightbase+j*(!rightscalar)]; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]*right[rightbase+j*(!rightscalar)]; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]/right[rightbase+j*(!rightscalar)]; } } } break; default: throw DataException("Unsupported binary operation"); } } template<> void binaryOpVectorTagged(DataTypes::RealVectorType& res, // where result is to be stored const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type DPPSample, // number of datapoints per sample const typename DataTypes::RealVectorType::size_type DPSize, // datapoint size const DataTypes::RealVectorType& left, // LHS of calculation const bool leftscalar, const DataTypes::RealVectorType& right, // RHS of the calculation const bool rightscalar, const bool lefttagged, // true if left object is the tagged one const DataTagged& tagsource, // where to get tag offsets from escript::ES_optype operation); /** \brief Perform the given data point reduction operation on the data point specified by the given offset into the view. Reduces all elements of the data point using the given operation, returning the result as a scalar. Operation must be a pointer to a function. Called by escript::algorithm. \param left - vector containing the datapoint \param shape - shape of datapoints in the vector \param offset - beginning of datapoint in the vector \param operation - Input - Operation to apply. Must be a pointer to a function. \param initial_value */ template <class BinaryFunction> inline DataTypes::real_t reductionOpVector(const DataTypes::RealVectorType& left, const DataTypes::ShapeType& leftShape, DataTypes::RealVectorType::size_type offset, BinaryFunction operation, DataTypes::real_t initial_value) { ESYS_ASSERT((left.size()>0)&&checkOffset(left,leftShape,offset), "Couldn't perform reductionOp due to insufficient storage."); DataTypes::real_t current_value=initial_value; for (DataTypes::RealVectorType::size_type i=0;i<DataTypes::noValues(leftShape);i++) { current_value=operation(current_value,left[offset+i]); } return current_value; } template <class BinaryFunction> inline DataTypes::real_t reductionOpVector(const DataTypes::CplxVectorType& left, const DataTypes::ShapeType& leftShape, DataTypes::CplxVectorType::size_type offset, BinaryFunction operation, DataTypes::real_t initial_value) { ESYS_ASSERT((left.size()>0)&&checkOffset(left,leftShape,offset), "Couldn't perform reductionOp due to insufficient storage."); DataTypes::real_t current_value=initial_value; for (DataTypes::RealVectorType::size_type i=0;i<DataTypes::noValues(leftShape);i++) { current_value=operation(current_value,left[offset+i]); } return current_value; } /** \brief computes the inverses of square (up to 3x3) matricies \param in - vector containing the input matricies \param inShape - shape of the input matricies \param inOffset - the beginning of the input matricies within the vector "in" \param out - vector to store the inverses \param outShape - expected shape of the inverses \param outOffset - starting location for storing the inverses in out \param count - number of matricies to invert \param helper - associated working storage \exception DataException if input and output are not the correct shape or if any of the matricies are not invertible. \return 0 on success, on failure the return value should be passed to matrixInverseError(int err). */ int matrix_inverse(const DataTypes::RealVectorType& in, const DataTypes::ShapeType& inShape, DataTypes::RealVectorType::size_type inOffset, DataTypes::RealVectorType& out, const DataTypes::ShapeType& outShape, DataTypes::RealVectorType::size_type outOffset, int count, LapackInverseHelper& helper); /** \brief throws an appropriate exception based on failure of matrix_inverse. \param err - error code returned from matrix_inverse \warning do not call in a parallel region since it throws. */ void matrixInverseError(int err); /** \brief returns true if the vector contains NaN */ inline bool vectorHasNaN(const DataTypes::RealVectorType& in, DataTypes::RealVectorType::size_type inOffset, size_t count) { for (size_t z=inOffset;z<inOffset+count;++z) { if (nancheck(in[z])) { return true; } } return false; } inline bool vectorHasNaN(const DataTypes::CplxVectorType& in, DataTypes::CplxVectorType::size_type inOffset, size_t count) { for (size_t z=inOffset;z<inOffset+count;++z) { if (nancheck(in[z])) { return true; } } return false; } } // end namespace escript #endif // __ESCRIPT_DATAMATHS_H__
common.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include "common.h" #define CONF95 1.96 int nthreads = -1; // Number of OpenMP threads int delaylength = -1; // The number of iterations to delay for int outerreps = -1; // Outer repetitions double delaytime = -1.0; // Length of time to delay for in microseconds double targettesttime = 0.0; // The length of time in microseconds that the test // should run for. unsigned long innerreps; // Inner repetitions double *times; // Array of doubles storing the benchmark times in microseconds double referencetime; // The average reference time in microseconds to perform // outerreps runs double referencesd; // The standard deviation in the reference time in // microseconds for outerreps runs. double testtime; // The average test time in microseconds for // outerreps runs double testsd; // The standard deviation in the test time in // microseconds for outerreps runs. void usage(char *argv[]) { printf("Usage: %s.x \n" "\t--outer-repetitions <outer-repetitions> (default %d)\n" "\t--test-time <target-test-time> (default %0.2f microseconds)\n" "\t--delay-time <delay-time> (default %0.4f microseconds)\n" "\t--delay-length <delay-length> " "(default auto-generated based on processor speed)\n", argv[0], DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME); } void parse_args(int argc, char *argv[]) { // Parse the parameters int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--delay-time") == 0.0) { delaytime = atof(argv[++arg]); if (delaytime == 0.0) { printf("Invalid float:--delay-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--outer-repetitions") == 0) { outerreps = atoi(argv[++arg]); if (outerreps == 0) { printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--test-time") == 0) { targettesttime = atof(argv[++arg]); if (targettesttime == 0) { printf("Invalid integer:--test-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } int getdelaylengthfromtime(double delaytime) { int i, reps; double lapsedtime, starttime; // seconds reps = 1000; lapsedtime = 0.0; delaytime = delaytime/1.0E6; // convert from microseconds to seconds // Note: delaytime is local to this function and thus the conversion // does not propagate to the main code. // Here we want to use the delaytime in microseconds to find the // delaylength in iterations. We start with delaylength=0 and // increase until we get a large enough delaytime, return delaylength // in iterations. delaylength = 0; delay(delaylength); while (lapsedtime < delaytime) { delaylength = delaylength * 1.1 + 1; starttime = getclock(); for (i = 0; i < reps; i++) { delay(delaylength); } lapsedtime = (getclock() - starttime) / (double) reps; } return delaylength; } unsigned long getinnerreps(void (*test)(void)) { innerreps = 10L; // some initial value double time = 0.0; while (time < targettesttime) { double start = getclock(); test(); time = (getclock() - start) * 1.0e6; innerreps *=2; // Test to stop code if compiler is optimising reference time expressions away if (innerreps > (targettesttime*1.0e15)) { printf("Compiler has optimised reference loop away, STOP! \n"); printf("Try recompiling with lower optimisation level \n"); exit(1); } } return innerreps; } void printheader(char *name) { printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing %s time using %lu reps\n", name, innerreps); } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i = 1; i <= outerreps; i++) { mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; } meantime = totaltime / outerreps; sumsq = 0; for (i = 1; i <= outerreps; i++) { sumsq += (times[i] - meantime) * (times[i] - meantime); } sd = sqrt(sumsq / (outerreps - 1)); cutoff = 3.0 * sd; nr = 0; for (i = 1; i <= outerreps; i++) { if (fabs(times[i] - meantime) > cutoff) nr++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %f %f %f %f %d\n", outerreps, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; } void printfooter(char *name, double testtime, double testsd, double referencetime, double refsd) { printf("%s time = %f microseconds +/- %f\n", name, testtime, CONF95*testsd); printf("%s overhead = %f microseconds +/- %f\n", name, testtime-referencetime, CONF95*(testsd+referencesd)); } void printreferencefooter(char *name, double referencetime, double referencesd) { printf("%s time = %f microseconds +/- %f\n", name, referencetime, CONF95 * referencesd); } void init(int argc, char **argv) { #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } parse_args(argc, argv); if (outerreps == -1) { outerreps = DEFAULT_OUTER_REPS; } if (targettesttime == 0.0) { targettesttime = DEFAULT_TEST_TARGET_TIME; } if (delaytime == -1.0) { delaytime = DEFAULT_DELAY_TIME; } delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations times = malloc((outerreps+1) * sizeof(double)); printf("Running OpenMP benchmark version 3.0\n" "\t%d thread(s)\n" "\t%d outer repetitions\n" "\t%0.2f test time (microseconds)\n" "\t%d delay length (iterations) \n" "\t%f delay time (microseconds)\n", nthreads, outerreps, targettesttime, delaylength, delaytime); } void finalise(void) { free(times); } void initreference(char *name) { printheader(name); } /* Calculate the reference time. */ void reference(char *name, void (*refer)(void)) { int k; double start; // Calculate the required number of innerreps innerreps = getinnerreps(refer); initreference(name); for (k = 0; k <= outerreps; k++) { start = getclock(); refer(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisereference(name); } void finalisereference(char *name) { stats(&referencetime, &referencesd); printreferencefooter(name, referencetime, referencesd); } void intitest(char *name) { printheader(name); } void finalisetest(char *name) { stats(&testtime, &testsd); printfooter(name, testtime, testsd, referencetime, referencesd); } /* Function to run a microbenchmark test*/ void benchmark(char *name, void (*test)(void)) { int k; double start; // Calculate the required number of innerreps innerreps = getinnerreps(test); intitest(name); for (k=0; k<=outerreps; k++) { start = getclock(); test(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisetest(name); } // For the Cray compiler on HECToR we need to turn off optimisation // for the delay and array_delay functions. Other compilers should // not be afffected. //#pragma _CRI noopt void __attribute__((optnone)) delay(int delaylength) { int i; float a = 0.; for (i = 0; i < delaylength; i++) a += i; if (a < 0) printf("%f \n", a); } void __attribute__((optnone)) array_delay(int delaylength, double a[1]) { int i; a[0] = 1.0; for (i = 0; i < delaylength; i++) a[0] += i; if (a[0] < 0) printf("%f \n", a[0]); } // Re-enable optimisation for remainder of source. //#pragma _CRI opt double getclock() { double time; // Returns a value in seconds of the time elapsed from some arbitrary, // but consistent point. double omp_get_wtime(void); time = omp_get_wtime(); return time; } int returnfalse() { return 0; }
kr1openmp.c
// // kr1openmp.c // AVS // // Created by Alexander Degtyarev on 10/31/16. // Copyright © 2016 Alexander Degtyarev. All rights reserved. // // gcc-6 -fopenmp -o kr1 kr1openmp.c #include <stdio.h> #include <time.h> #include <stdlib.h> #include <omp.h> #include <stdbool.h> #include <unistd.h> int main(int argc, const char * argv[]) { bool debug_info = argc > 1; const float TOP_CELL_VAL = 10.0; unsigned int rows = 0; unsigned int columns = 0; char* readbuf = malloc(1024*sizeof(char)); //synchronized data input do{ printf("Set row count\n"); if(scanf("%4u",&rows) > 0 && rows > 0 && rows < 10000){ break; }else{ fgets(readbuf,1024,stdin); printf("Set val in range [1,9999]\n"); } }while(true); do{ printf("Set collumn count\n"); if(scanf("%4u",&columns) > 0 && columns > 0 && columns < 10000){ break; }else{ fgets(readbuf,1024,stdin); printf("Set val in range [1,9999]\n"); } }while(true); //allocating resources float *vector = malloc (rows * sizeof(float)); float *result = malloc (columns * sizeof(float)); float **matrix; matrix = (float **) malloc(rows*sizeof(float *)); for(int i = 0; i < rows; i++){ matrix[i]=(float *) malloc(columns * sizeof(float)); } printf("\n -SRAND-\n"); //parallel randomize matrix #pragma omp parallel shared(rows,columns,matrix,vector) { srand((unsigned int)time(NULL) ^ omp_get_thread_num()); } printf("\n -RANDOMIZER-\n"); #pragma omp parallel for shared(rows,columns,matrix,vector,result) for(int p_idx = 0; p_idx < rows*columns; ++p_idx) { int p_i = p_idx/columns; int p_j = p_idx%columns; if(debug_info) printf("befor pidx:i:j %d:%d:%d thread: %d:%d\n",p_idx,p_i,p_j,omp_get_thread_num(), omp_get_num_threads()); result[p_i] = 0.0; if(p_i==0){ vector[p_j] = ((float)rand()/(float)(RAND_MAX)) * TOP_CELL_VAL; } matrix[p_i][p_j] = ((float)rand()/(float)(RAND_MAX)) * TOP_CELL_VAL; if(debug_info) printf("after pidx:i:j %d:%d:%d thread: %d:%d\n",p_idx,p_i,p_j,omp_get_thread_num(), omp_get_num_threads()); } //synchronized print printf("\n -MATRIX-\n"); for(int i = 0; i < rows; ++i) { for (int j = 0; j < columns; ++j) { printf(" %3.2f",matrix[i][j]); } printf("\n"); } printf("\n -VECTOR-\n"); for (int j = 0; j < columns; ++j) { printf(" %3.2f\n",vector[j]); } printf("\n -----------------\n"); //parallel multiplication #pragma omp parallel for shared(rows,columns,matrix,vector,result) for(int i = 0; i < rows; ++i) { for (int j = 0; j < columns; ++j) { result[i]+=matrix[i][j]*vector[j]; if(debug_info) printf("Thread: %d:%d\n",omp_get_thread_num(), omp_get_num_threads()); } } //synchronized output printf("\n -RESULT-\n"); for(int i = 0; i < rows; ++i) { printf(" %2.1f \n",result[i]); } free(vector); free(result); free(matrix); return 0; }
subopt.c
/* * suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker * * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <ctype.h> #include <string.h> #include <math.h> #include "ViennaRNA/fold.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/datastructures/lists.h" #include "ViennaRNA/eval.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/subopt.h" /* hack */ #include "ViennaRNA/color_output.inc" #ifdef _OPENMP #include <omp.h> #endif #define true 1 #define false 0 #ifndef ON_SAME_STRAND #define ON_SAME_STRAND(I, J, C) (((I) >= (C)) || ((J) < (C))) #endif /** * @brief Sequence interval stack element used in subopt.c */ typedef struct INTERVAL { int i; int j; int array_flag; } INTERVAL; typedef struct { char *structure; LIST *Intervals; int partial_energy; int is_duplex; /* int best_energy; */ /* best attainable energy */ } STATE; typedef struct { LIST *Intervals; LIST *Stack; int nopush; } subopt_env; struct old_subopt_dat { unsigned long max_sol; unsigned long n_sol; SOLUTION *SolutionList; FILE *fp; int cp; }; /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int subopt_sorted = 0; /* output sorted by energy */ PUBLIC int density_of_states[MAXDOS + 1]; PUBLIC double print_energy = 9999; /* printing threshold for use with logML */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE int backward_compat = 0; PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PRIVATE SOLUTION * wrap_subopt(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp); #endif PRIVATE void make_pair(int i, int j, STATE *state); /* mark a gquadruplex in the resulting dot-bracket structure */ PRIVATE void make_gquad(int i, int L, int l[3], STATE *state); PRIVATE INTERVAL * make_interval(int i, int j, int ml); PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length); PRIVATE STATE * copy_state(STATE *state); PRIVATE void print_state(STATE *state); PRIVATE void UNUSED print_stack(LIST *list); PRIVATE LIST * make_list(void); PRIVATE void push(LIST *list, void *data); PRIVATE void *pop(LIST *list); PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state); PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env); PRIVATE void free_interval_node(INTERVAL *node); PRIVATE void free_state_node(STATE *node); PRIVATE void push_back(LIST *Stack, STATE *state); PRIVATE char * get_structure(STATE *state); PRIVATE int compare(const void *solution1, const void *solution2); PRIVATE int compare_en(const void *solution1, const void *solution2); PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp); PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void old_subopt_print(const char *structure, float energy, void *data); PRIVATE void old_subopt_store(const char *structure, float energy, void *data); PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ /*---------------------------------------------------------------------------*/ /*List routines--------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE void make_pair(int i, int j, STATE *state) { state->structure[i - 1] = '('; state->structure[j - 1] = ')'; } PRIVATE void make_gquad(int i, int L, int l[3], STATE *state) { int x; for (x = 0; x < L; x++) { state->structure[i - 1 + x] = '+'; state->structure[i - 1 + x + L + l[0]] = '+'; state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+'; state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+'; } } /*---------------------------------------------------------------------------*/ PRIVATE INTERVAL * make_interval(int i, int j, int array_flag) { INTERVAL *interval; interval = lst_newnode(sizeof(INTERVAL)); interval->i = i; interval->j = j; interval->array_flag = array_flag; return interval; } /*---------------------------------------------------------------------------*/ PRIVATE void free_interval_node(INTERVAL *node) { lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE void free_state_node(STATE *node) { free(node->structure); if (node->Intervals) lst_kill(node->Intervals, lst_freenode); lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length) { STATE *state; state = lst_newnode(sizeof(STATE)); if (Intervals) state->Intervals = Intervals; else state->Intervals = lst_init(); if (structure) { state->structure = structure; } else { int i; state->structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) state->structure[i] = '.'; } state->partial_energy = partial_energy; return state; } /*---------------------------------------------------------------------------*/ PRIVATE STATE * copy_state(STATE *state) { STATE *new_state; void *after; INTERVAL *new_interval, *next; new_state = lst_newnode(sizeof(STATE)); new_state->Intervals = lst_init(); new_state->partial_energy = state->partial_energy; /* new_state->best_energy = state->best_energy; */ if (state->Intervals->count) { after = LST_HEAD(new_state->Intervals); for (next = lst_first(state->Intervals); next; next = lst_next(next)) { new_interval = lst_newnode(sizeof(INTERVAL)); *new_interval = *next; lst_insertafter(new_state->Intervals, new_interval, after); after = new_interval; } } new_state->structure = strdup(state->structure); if (!new_state->structure) vrna_message_error("out of memory"); return new_state; } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_state(STATE *state) { INTERVAL *next; if (state->Intervals->count) { printf("%d intervals:\n", state->Intervals->count); for (next = lst_first(state->Intervals); next; next = lst_next(next)) printf("[%d,%d],%d ", next->i, next->j, next->array_flag); printf("\n"); } printf("partial structure: %s\n", state->structure); printf("\n"); printf(" partial_energy: %d\n", state->partial_energy); /* printf(" best_energy: %d\n", state->best_energy); */ (void)fflush(stdout); } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_stack(LIST *list) { void *rec; printf("================\n"); printf("%d states\n", list->count); for (rec = lst_first(list); rec; rec = lst_next(rec)) { printf("state-----------\n"); print_state(rec); } printf("================\n"); } /*---------------------------------------------------------------------------*/ PRIVATE LIST * make_list(void) { return lst_init(); } /*---------------------------------------------------------------------------*/ PRIVATE void push(LIST *list, void *data) { lst_insertafter(list, data, LST_HEAD(list)); } /* PRIVATE void */ /* push_stack(STATE *state) { */ /* keep the stack sorted by energy */ /* STATE *after, *next; */ /* nopush = false; */ /* next = after = LST_HEAD(Stack); */ /* while ( next = lst_next(next)) { */ /* if ( next->best_energy >= state->best_energy ) break; */ /* after = next; */ /* } */ /* lst_insertafter(Stack, state, after); */ /* } */ /*---------------------------------------------------------------------------*/ PRIVATE void * pop(LIST *list) { void *data; data = lst_deletenext(list, LST_HEAD(list)); return data; } /*---------------------------------------------------------------------------*/ /*auxiliary routines---------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state) { /* evaluation of best possible energy attainable within remaining intervals */ register int sum; INTERVAL *next; vrna_md_t *md; vrna_mx_mfe_t *matrices; int *indx; md = &(vc->params->model_details); matrices = vc->matrices; indx = vc->jindx; sum = state->partial_energy; /* energy of already found elements */ for (next = lst_first(state->Intervals); next; next = lst_next(next)) { if (next->array_flag == 0) sum += (md->circ) ? matrices->Fc : matrices->f5[next->j]; else if (next->array_flag == 1) sum += matrices->fML[indx[next->j] + next->i]; else if (next->array_flag == 2) sum += matrices->c[indx[next->j] + next->i]; else if (next->array_flag == 3) sum += matrices->fM1[indx[next->j] + next->i]; else if (next->array_flag == 4) sum += matrices->fc[next->i]; else if (next->array_flag == 5) sum += matrices->fc[next->j]; else if (next->array_flag == 6) sum += matrices->ggg[indx[next->j] + next->i]; } return sum; } /*---------------------------------------------------------------------------*/ PRIVATE void push_back(LIST *Stack, STATE *state) { push(Stack, copy_state(state)); return; } /*---------------------------------------------------------------------------*/ PRIVATE char * get_structure(STATE *state) { char *structure; structure = strdup(state->structure); return structure; } /*---------------------------------------------------------------------------*/ PRIVATE int compare(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return strcmp(((SOLUTION *)solution1)->structure, ((SOLUTION *)solution2)->structure); } PRIVATE int compare_en(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return 0; } /*---------------------------------------------------------------------------*/ PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp) /* prints stuff */ { SOLUTION *sol; for (sol = SL; sol->structure != NULL; sol++) { char *e_string = vrna_strdup_printf(" %6.2f", sol->energy); char *ss = vrna_db_unpack(sol->structure); char *s = vrna_cut_point_insert(ss, cp); print_structure(fp, s, e_string); free(s); free(ss); free(e_string); } } PRIVATE STATE * derive_new_state(int i, int j, STATE *s, int e, int flag) { STATE *s_new = copy_state(s); INTERVAL *ival = make_interval(i, j, flag); push(s_new->Intervals, ival); s_new->partial_energy += e; return s_new; } PRIVATE void fork_state(int i, int j, STATE *s, int e, int flag, subopt_env *env) { STATE *s_new = derive_new_state(i, j, s, e, flag); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_int_state(int i, int j, int p, int q, STATE *s, int e, subopt_env *env) { STATE *s_new = derive_new_state(p, q, s, e, 2); make_pair(i, j, s_new); make_pair(p, q, s_new); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_state_pair(int i, int j, STATE *s, int e, subopt_env *env) { STATE *new_state; new_state = copy_state(s); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states_pair(int i, int j, int k, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i + 1, k - 1, flag1); interval2 = make_interval(k, j - 1, flag2); if (k - i < j - k) { /* push larger interval first */ push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states(int i, int j, int p, int q, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i, j, flag1); interval2 = make_interval(p, q, flag2); if ((j - i) < (q - p)) { push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } /*---------------------------------------------------------------------------*/ /* start of subopt backtracking ---------------------------------------------*/ /*---------------------------------------------------------------------------*/ PUBLIC SOLUTION * vrna_subopt(vrna_fold_compound_t *vc, int delta, int sorted, FILE *fp) { struct old_subopt_dat data; vrna_subopt_callback *cb; data.SolutionList = NULL; data.max_sol = 128; data.n_sol = 0; data.fp = fp; data.cp = vc->cutpoint; if (vc) { /* SolutionList stores the suboptimal structures found */ data.SolutionList = (SOLUTION *)vrna_alloc(data.max_sol * sizeof(SOLUTION)); /* end initialize ------------------------------------------------------- */ if (fp) { float min_en; char *SeQ, *energies = NULL; if (vc->strands > 1) min_en = vrna_mfe_dimer(vc, NULL); else min_en = vrna_mfe(vc, NULL); SeQ = vrna_cut_point_insert(vc->sequence, vc->cutpoint); energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.); print_structure(fp, SeQ, energies); free(SeQ); free(energies); vrna_mx_mfe_free(vc); } cb = old_subopt_store; if (fp) cb = (sorted) ? old_subopt_store_compressed : old_subopt_print; /* call subopt() */ vrna_subopt_cb(vc, delta, cb, (void *)&data); if (sorted) { /* sort structures by energy */ if (data.n_sol > 0) { int (*compare_fun)(const void *a, const void *b); switch (sorted) { case VRNA_SORT_BY_ENERGY_ASC: compare_fun = compare_en; break; default: /* a.k.a. VRNA_SORT_BY_ENERGY_LEXICOGRAPHIC_ASC */ compare_fun = compare; break; } qsort(data.SolutionList, data.n_sol - 1, sizeof(SOLUTION), compare_fun); } if (fp) make_output(data.SolutionList, vc->cutpoint, fp); } if (fp) { /* we've printed everything -- free solutions */ SOLUTION *sol; for (sol = data.SolutionList; sol->structure != NULL; sol++) free(sol->structure); free(data.SolutionList); data.SolutionList = NULL; } } return data.SolutionList; } PUBLIC void vrna_subopt_cb(vrna_fold_compound_t *vc, int delta, vrna_subopt_callback *cb, void *data) { subopt_env *env; STATE *state; INTERVAL *interval; unsigned int *so, *ss, *se; int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular, threshold; double structure_energy, min_en, eprint; char *struc, *structure; float correction; vrna_param_t *P; vrna_md_t *md; int minimal_energy; int Fc; int *f5; vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID); length = vc->length; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; P = vc->params; md = &(P->model_details); /* do mfe folding to get fill arrays and get ground state energy */ /* in case dangles is neither 0 or 2, set dangles=2 while folding */ circular = md->circ; logML = md->logML; old_dangles = dangle_model = md->dangles; if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */ md->uniq_ML = 1; /* temporarily set dangles to 2 if necessary */ if ((md->dangles != 0) && (md->dangles != 2)) md->dangles = 2; struc = (char *)vrna_alloc(sizeof(char) * (length + 1)); if (circular) { min_en = vrna_mfe(vc, struc); Fc = vc->matrices->Fc; f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } else { min_en = vrna_mfe_dimer(vc, struc); f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } free(struc); eprint = print_energy + min_en; correction = (min_en < 0) ? -0.1 : 0.1; /* Initialize ------------------------------------------------------------ */ maxlevel = 0; count = 0; partial_energy = 0; /* Initialize the stack ------------------------------------------------- */ minimal_energy = (circular) ? Fc : f5[length]; threshold = minimal_energy + delta; if (threshold >= INF) { vrna_message_warning("Energy range too high, limiting to reasonable value"); threshold = INF - EMAX; } /* init env data structure */ env = (subopt_env *)vrna_alloc(sizeof(subopt_env)); env->Stack = NULL; env->nopush = true; env->Stack = make_list(); /* anchor */ env->Intervals = make_list(); /* initial state: */ interval = make_interval(1, length, 0); /* interval [1,length,0] */ push(env->Intervals, interval); env->nopush = false; state = make_state(env->Intervals, NULL, partial_energy, 0, length); /* state->best_energy = minimal_energy; */ push(env->Stack, state); env->nopush = false; /* end initialize ------------------------------------------------------- */ while (1) { /* forever, til nothing remains on stack */ maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel); if (LST_EMPTY(env->Stack)) { /* we are done! clean up and quit */ /* fprintf(stderr, "maxlevel: %d\n", maxlevel); */ lst_kill(env->Stack, free_state_node); cb(NULL, 0, data); /* NULL (last time to call callback function */ break; } /* pop the last element ---------------------------------------------- */ state = pop(env->Stack); /* current state to work with */ if (LST_EMPTY(state->Intervals)) { int e; /* state has no intervals left: we got a solution */ count++; structure = get_structure(state); structure_energy = state->partial_energy / 100.; #ifdef CHECK_ENERGY structure_energy = vrna_eval_structure(vc, structure); if (!logML) if ((double)(state->partial_energy / 100.) != structure_energy) { vrna_message_error("%s %6.2f %6.2f", structure, state->partial_energy / 100., structure_energy); exit(1); } #endif if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */ structure_energy = vrna_eval_structure(vc, structure); e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */ if (e > MAXDOS) e = MAXDOS; density_of_states[e]++; if (structure_energy <= eprint) { char *outstruct = vrna_cut_point_insert(structure, (vc->strands > 1) ? ss[so[1]] : -1); cb((const char *)outstruct, structure_energy, data); free(outstruct); } free(structure); } else { /* get (and remove) next interval of state to analyze */ interval = pop(state->Intervals); scan_interval(vc, interval->i, interval->j, interval->array_flag, threshold, state, env); free_interval_node(interval); /* free the current interval */ } free_state_node(state); /* free the current state */ } /* end of while (1) */ /* cleanup memory */ free(env); } PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env) { /* real backtrack routine */ /* array_flag = 0: trace back in f5-array */ /* array_flag = 1: trace back in fML-array */ /* array_flag = 2: trace back in repeat() */ /* array_flag = 3: trace back in fM1-array */ STATE *new_state, *temp_state; INTERVAL *new_interval; vrna_param_t *P; vrna_md_t *md; register int k, fi, cij, ij; register int type; register int dangle_model; register int noLP; unsigned int *sn, *so, *ss, *se; int element_energy, best_energy; int *fc, *f5, *c, *fML, *fM1, *ggg; int FcH, FcI, FcM, *fM2; int length, *indx, *rtype, circular, with_gquad, turn; char *ptype; short *S1; unsigned char *hard_constraints, hc_decompose; vrna_hc_t *hc; vrna_sc_t *sc; length = vc->length; sn = vc->strand_number; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; indx = vc->jindx; ptype = vc->ptype; S1 = vc->sequence_encoding; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); dangle_model = md->dangles; noLP = md->noLP; circular = md->circ; with_gquad = md->gquad; turn = md->min_loop_size; fc = vc->matrices->fc; f5 = vc->matrices->f5; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; FcH = vc->matrices->FcH; FcI = vc->matrices->FcI; FcM = vc->matrices->FcM; fM2 = vc->matrices->fM2; hc = vc->hc; hard_constraints = hc->mx; sc = vc->sc; best_energy = best_attainable_energy(vc, state); /* .. on remaining intervals */ env->nopush = true; if ((i > 1) && (!array_flag)) vrna_message_error("Error while backtracking!"); if ((j < i + turn + 1) && ((sn[i] == so[1]) || (sn[j] == so[0]))) { /* minimal structure element */ if (array_flag == 0) /* do not forget to add f5[j], since it may contain pseudo energies from soft constraining */ state->partial_energy += f5[j]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } ij = indx[j] + i; /* 13131313131313131313131313131313131313131313131313131313131313131313131 */ if (array_flag == 3 || array_flag == 1) { /* array_flag = 3: interval i,j was generated during */ /* a multiloop decomposition using array fM1 in repeat() */ /* or in this block */ /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ if ((hc->up_ml[j]) && (((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) || (fML[indx[j - 1] + i] != INF))) { if (array_flag == 3) fi = fM1[indx[j - 1] + i] + P->MLbase; else fi = fML[indx[j - 1] + i] + P->MLbase; if (sc) { if (sc->energy_up) fi += sc->energy_up[j][1]; if (sc->f) fi += sc->f(i, j, i, j - 1, VRNA_DECOMP_ML_ML, sc->data); } if ((fi + best_energy <= threshold) && (sn[j - 1] == sn[j])) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, P->MLbase, array_flag, env); } hc_decompose = hard_constraints[length * i + j]; if (hc_decompose & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) { /* i,j may pair */ cij = c[ij]; if (cij != INF) { type = vrna_get_ptype(ij, ptype); switch (dangle_model) { case 0: element_energy = E_MLstem(type, -1, -1, P); break; default: element_energy = E_MLstem(type, (((i > 1) && (sn[i - 1] == sn[i])) || circular) ? S1[i - 1] : -1, (((j < length) && (sn[j] == sn[j + 1])) || circular) ? S1[j + 1] : -1, P); break; } if (sc) { if (sc->f) element_energy += sc->f(i, j, i, j, VRNA_DECOMP_ML_STEM, sc->data); } cij += element_energy; if (cij + best_energy <= threshold) repeat(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } else if ((with_gquad) && (ggg[ij] != INF)) { element_energy = E_MLstem(0, -1, -1, P); cij = ggg[ij] + element_energy; if (cij + best_energy <= threshold) repeat_gquad(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 3 || array_flag == 1 */ /* 11111111111111111111111111111111111111111111111111111111111111111111111 */ if (array_flag == 1) { /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ int stopp, k1j; if ((sn[i - 1] == sn[i]) && (sn[j] == sn[j + 1])) { /*backtrack in FML only if multiloop is possible*/ for (k = i + turn + 1; k <= j - 1 - turn; k++) { /* Multiloop decomposition if i,j contains more than 1 stack */ if ((with_gquad) && (sn[k] == sn[k + 1]) && (fML[indx[k] + i] != INF) && (ggg[indx[j] + k + 1] != INF)) { element_energy = E_MLstem(0, -1, -1, P); if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat_gquad(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } k1j = indx[j] + k + 1; if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (fML[indx[k] + i] != INF) && (c[k1j] != INF)) { short s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[i - 1] == sn[i]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, sc->data); } if (sn[k] == sn[k + 1]) { if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } } } } if (vc->strands > 1) { stopp = se[so[0]] - 1; /*if cp -1: k on cut, => no ml*/ stopp = MIN2(stopp, j - 1 - turn); if (i > ss[so[1]]) stopp = j - 1 - turn; else if (i == ss[so[1]]) stopp = 0; /*not a multi loop*/ } else { stopp = j - 1 - turn; } int up = 1; for (k = i; k <= stopp; k++, up++) { if (hc->up_ml[i] >= up) { k1j = indx[j] + k + 1; /* Multiloop decomposition if i,j contains only 1 stack */ if ((with_gquad) && (ggg[k1j] != INF)) { element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up; if (sc) if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (ggg[k1j] + element_energy + best_energy <= threshold) repeat_gquad(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (c[k1j] != INF)) { int s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); element_energy += P->MLbase * up; if (sc) { if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (sc->f) element_energy += sc->f(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, sc->data); } if (c[k1j] + element_energy + best_energy <= threshold) repeat(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } } } } /* array_flag == 1 */ /* 22222222222222222222222222222222222222222222222222 */ /* */ /* array_flag = 2: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* */ /* 22222222222222222222222222222222222222222222222222 */ if (array_flag == 2) { repeat(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) if (!noLP) vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j); return; } /* 00000000000000000000000000000000000000000000000000 */ /* */ /* array_flag = 0: interval i,j was found while */ /* tracing back through f5-array and c-array */ /* or within this block */ /* */ /* 00000000000000000000000000000000000000000000000000 */ if ((array_flag == 0) && !circular) { int s5, s3, kj, tmp_en; if ((hc->up_ext[j]) && (f5[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (f5[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 0, env); } for (k = j - turn - 1; k > 1; k--) { kj = indx[j] + k; if ((with_gquad) && (sn[k] == sn[j]) && (f5[k - 1] != INF) && (ggg[kj] != INF)) { element_energy = 0; if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; /* backtrace the quadruplex */ repeat_gquad(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (f5[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); /* k and j pair */ switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k - 1] : -1; s3 = ((j < length) && (sn[j] == sn[j + 1])) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sn[k] != sn[j]) /*&&(state->is_duplex==0))*/ element_energy += P->DuplexInit; /*state->is_duplex=1;*/ if (sc) { if (sc->f) element_energy += sc->f(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + 1; if ((with_gquad) && (sn[k] == sn[j]) && (ggg[kj] != INF)) { element_energy = 0; if (ggg[kj] + element_energy + best_energy <= threshold) /* backtrace the quadruplex */ repeat_gquad(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) && (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sn[1] != sn[j]) element_energy += P->DuplexInit; if (sc) { if (sc->f) element_energy += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } } /* end array_flag == 0 && !circular*/ /* or do we subopt circular? */ else if (array_flag == 0) { int k, l, p, q, tmp_en; /* if we've done everything right, we will never reach this case more than once */ /* right after the initilization of the stack with ([1,n], empty, 0) */ /* lets check, if we can have an open chain without breaking the threshold */ /* this is an ugly work-arround cause in case of an open chain we do not have to */ /* backtrack anything further... */ if (hc->up_ext[1] >= length) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[1][length]; if (sc->f) tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data); } if (tmp_en <= threshold) { new_state = derive_new_state(1, 2, state, 0, 0); new_state->partial_energy = 0; push(env->Stack, new_state); env->nopush = false; } } /* ok, lets check if we can do an exterior hairpin without breaking the threshold */ /* best energy should be 0 if we are here */ if (FcH + best_energy <= threshold) { /* lets search for all exterior hairpin cases, that fit into our threshold barrier */ /* we use index k,l to avoid confusion with i,j index of our state... */ /* if we reach here, i should be 1 and j should be n respectively */ for (k = i; k < j; k++) { if (hc->up_hp[1] < k) break; for (l = j; l >= k + turn + 1; l--) { int kl, tmpE; kl = indx[l] + k; if (c[kl] != INF) { tmpE = vrna_E_hp_loop(vc, l, k); if (c[kl] + tmpE + best_energy <= threshold) { /* what we really have to do is something like this, isn't it? */ /* we have to create a new state, with interval [k,l], then we */ /* add our loop energy as initial energy of this state and put */ /* the state onto the stack R... for further refinement... */ /* we also denote this new interval to be scanned in C */ fork_state(k, l, state, tmpE, 2, env); } } } } } /* now lets see, if we can do an exterior interior loop without breaking the threshold */ if (FcI + best_energy <= threshold) { /* now we search for our exterior interior loop possibilities */ for (k = i; k < j; k++) { for (l = j; l >= k + turn + 1; l--) { int kl, type, tmpE; kl = indx[l] + k; /* just confusing these indices ;-) */ if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[kl] != INF)) { type = rtype[vrna_get_ptype(kl, ptype)]; for (p = l + 1; p < j; p++) { int u1, qmin; u1 = p - l - 1; if (u1 + k - 1 > MAXLOOP) break; if (hc->up_int[l + 1] < u1) break; qmin = u1 + k - 1 + j - MAXLOOP; if (qmin < p + turn + 1) qmin = p + turn + 1; for (q = j; q >= qmin; q--) { int u2, type_2; if (hc->up_int[q + 1] < (j - q + k - 1)) break; if ((hard_constraints[length * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[indx[q] + p] != INF)) { type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)]; u2 = k - 1 + j - q; if (u1 + u2 > MAXLOOP) continue; tmpE = E_IntLoop(u1, u2, type, type_2, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], P); if (sc) { if (sc->energy_up) tmpE += sc->energy_up[l + 1][p - l - 1] + sc->energy_up[q + 1][j - q] + sc->energy_up[1][k - 1]; if (sc->energy_stack) { if (u1 + u2 == 0) { tmpE += sc->energy_stack[k] + sc->energy_stack[l] + sc->energy_stack[p] + sc->energy_stack[q]; } } } if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) { /* ok, similar to the hairpin stuff, we add new states onto the stack R */ /* but in contrast to the hairpin decomposition, we have to add two new */ /* intervals, enclosed by k,l and p,q respectively and we also have to */ /* add the partial energy, that comes from the exterior interior loop */ fork_two_states(k, l, p, q, state, tmpE, 2, 2, env); } } } } } } } } /* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */ if (FcM <= threshold) { /* this decomposition will be somehow more complicated...so lets see what we do here... */ /* first we want to find out which split inidices we can use without exceeding the threshold */ int tmpE2; for (k = turn + 1; k < j - 2 * turn; k++) { if ((fML[indx[k] + 1] != INF) && (fM2[k + 1] != INF)) { tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing; if (tmpE2 + best_energy <= threshold) { /* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */ /* lets do it first in fM2 anyway */ for (l = k + turn + 2; l < j - turn - 1; l++) { tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1]; if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) { /* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */ /* three intervals for our new state to be pushed on stack R */ new_state = copy_state(state); /* first interval leads for search in fML array */ new_interval = make_interval(1, k, 1); push(new_state->Intervals, new_interval); env->nopush = false; /* next, we have the first interval that has to be traced in fM1 */ new_interval = make_interval(k + 1, l, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* and the last of our three intervals is also one to be traced within fM1 array... */ new_interval = make_interval(l + 1, j, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* mmh, we add the energy for closing the multiloop now... */ new_state->partial_energy += P->MLclosing; /* next we push our state onto the R stack */ push(env->Stack, new_state); env->nopush = false; } /* else we search further... */ } /* ok, we have to decompose fML now... */ } } } } } /* thats all folks for the circular case... */ /* 44444444444444444444444444444444444444444444444444 */ /* */ /* array_flag = 4: interval i,j was found while */ /* tracing back through fc-array smaller than than cp */ /* or within this block */ /* */ /* 44444444444444444444444444444444444444444444444444 */ if (array_flag == 4) { int ik, s5, s3, tmp_en; if ((hc->up_ext[i]) && (fc[i + 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[i][1]; if (sc->f) tmp_en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[i + 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 5'-end */ fork_state(i + 1, j, state, tmp_en, 4, env); } for (k = i + turn + 1; k < j; k++) { ik = indx[k] + i; if ((with_gquad) && (fc[k + 1] != INF) && (ggg[ik] != INF)) { if (fc[k + 1] + ggg[ik] + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat_gquad(vc, i, k, temp_state, 0, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * i + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k + 1] != INF) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; s3 = S1[k + 1]; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, sc->data); } if (fc[k + 1] + c[ik] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat(vc, i, k, temp_state, element_energy, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } } ik = indx[se[so[0]]] + i; /* indx[j] + i; */ if ((with_gquad) && (ggg[ik] != INF)) if (ggg[ik] + best_energy <= threshold) repeat_gquad(vc, i, se[so[0]], state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * i + se[so[0]]] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); s3 = -1; switch (dangle_model) { case 0: s5 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, se[so[0]], i, se[so[0]], VRNA_DECOMP_EXT_STEM, sc->data); } if (c[ik] + element_energy + best_energy <= threshold) repeat(vc, i, se[so[0]], state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 4 */ /* 55555555555555555555555555555555555555555555555555 */ /* */ /* array_flag = 5: interval cp=i,j was found while */ /* tracing back through fc-array greater than cp */ /* or within this block */ /* */ /* 55555555555555555555555555555555555555555555555555 */ if (array_flag == 5) { int kj, s5, s3, tmp_en; if ((hc->up_ext[j]) && (fc[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 5, env); } for (k = j - turn - 1; k > i; k--) { kj = indx[j] + k; if ((with_gquad) && (fc[k - 1] != INF) && (ggg[kj] != INF)) { if (fc[k - 1] + ggg[kj] + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat_gquad(vc, k, j, temp_state, 0, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); element_energy = 0; switch (dangle_model) { case 0: s3 = s5 = -1; break; default: s5 = S1[k - 1]; s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (fc[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + ss[so[1]]; /* indx[j] + i; */ if ((with_gquad) && (ggg[kj] != INF)) if (ggg[kj] + best_energy <= threshold) repeat_gquad(vc, ss[so[1]], j, state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * ss[so[1]] + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(ss[so[1]], j, ss[so[1]], j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, ss[so[1]], j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 5 */ if (array_flag == 6) { /* we have a gquad */ repeat_gquad(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j); return; } if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } /*---------------------------------------------------------------------------*/ PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { unsigned int *sn; int *ggg, *indx, element_energy; short *S1; vrna_param_t *P; indx = vc->jindx; sn = vc->strand_number; ggg = vc->matrices->ggg; S1 = vc->sequence_encoding; P = vc->params; /* find all gquads that fit into the energy range and the interval [i,j] */ STATE *new_state; best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (sn[i] == sn[j]) { element_energy = ggg[indx[j] + i]; if ((element_energy != INF) && (element_energy + best_energy <= threshold)) { int cnt; int *L; int *l; /* find out how many gquads we might expect in the interval [i,j] */ int num_gquads = get_gquad_count(S1, i, j); num_gquads++; L = (int *)vrna_alloc(sizeof(int) * num_gquads); l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3); L[0] = -1; get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy); for (cnt = 0; L[cnt] != -1; cnt++) { new_state = copy_state(state); make_gquad(i, L[cnt], &(l[3 * cnt]), new_state); new_state->partial_energy += part_energy; new_state->partial_energy += element_energy; /* new_state->best_energy = * hairpin[unpaired] + element_energy + best_energy; */ push(env->Stack, new_state); env->nopush = false; } free(L); free(l); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { /* routine to find stacks, bulges, internal loops and multiloops */ /* within interval closed by basepair i,j */ STATE *new_state; vrna_param_t *P; vrna_md_t *md; register int ij, k, p, q, energy, new; register int mm; register int no_close, type, type_2; char *ptype; unsigned int n, *sn, *so, *ss, *se; int element_energy; int *fc, *c, *fML, *fM1, *ggg; int rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn; short *S1; vrna_hc_t *hc; vrna_sc_t *sc; n = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; indx = vc->jindx; sn = vc->strand_number; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); noGUclosure = md->noGUclosure; noLP = md->noLP; with_gquad = md->gquad; dangle_model = md->dangles; turn = md->min_loop_size; fc = vc->matrices->fc; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; hc = vc->hc; sc = vc->sc; ij = indx[j] + i; type = vrna_get_ptype(ij, ptype); /* * if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j); */ no_close = (((type == 3) || (type == 4)) && noGUclosure); if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { if (noLP) { /* always consider the structure with additional stack */ if (i + turn + 2 < j) { if (hc->mx[n * (i + 1) + j - 1] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) { type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)]; energy = 0; if ((sn[i] == sn[i + 1]) && (sn[j - 1] == sn[j])) { energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P); if (sc) { if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { energy += sc->energy_stack[i] + sc->energy_stack[i + 1] + sc->energy_stack[j - 1] + sc->energy_stack[j]; } if (sc->f) energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_IL, sc->data); } new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2); make_pair(i, j, new_state); make_pair(i + 1, j - 1, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')') /* adding a stack is the only possible structure */ return; } } } } } best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) { int minq = j - i + p - MAXLOOP - 2; if (minq < p + 1 + turn) minq = p + 1 + turn; if (hc->up_int[i + 1] < (p - i - 1)) break; for (q = j - 1; q >= minq; q--) { if (hc->up_int[q + 1] < (j - q - 1)) break; /* skip stack if noLP, since we've already processed it above */ if ((noLP) && (p == i + 1) && (q == j - 1)) continue; if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC)) continue; if (c[indx[q] + p] == INF) continue; type_2 = vrna_get_ptype(indx[q] + p, ptype); if (noGUclosure) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ if ((sn[i] == sn[p]) && (sn[q] == sn[j])) { energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2], S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); new = energy + c[indx[q] + p]; if (sc) { if (sc->energy_up) energy += sc->energy_up[i + 1][p - i - 1] + sc->energy_up[q + 1][j - q - 1]; if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { if ((p == i + 1) && (q == j - 1)) { energy += sc->energy_stack[i] + sc->energy_stack[p] + sc->energy_stack[q] + sc->energy_stack[j]; } } if (sc->f) energy += sc->f(i, j, p, q, VRNA_DECOMP_PAIR_IL, sc->data); } new = energy + c[indx[q] + p]; if (new + best_energy <= threshold) /* stack, bulge, or interior loop */ fork_int_state(i, j, p, q, state, part_energy + energy, env); } /*end of if block */ } /* end of q-loop */ } /* end of p-loop */ } if (sn[i] != sn[j]) { /*look in fc*/ if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[i + 1] != INF) && (fc[j - 1] != INF)) { rt = rtype[type]; element_energy = 0; switch (dangle_model) { case 0: element_energy = vrna_E_ext_stem(rt, -1, -1, P); break; default: element_energy = vrna_E_ext_stem(rt, (sn[j - 1] == sn[j]) ? S1[j - 1] : -1, (sn[i] == sn[i + 1]) ? S1[i + 1] : -1, P); break; } if (fc[i + 1] + fc[j - 1] + element_energy + best_energy <= threshold) fork_two_states_pair(i, j, ss[so[1]], state, part_energy + element_energy, 4, 5, env); } } mm = P->MLclosing; rt = rtype[type]; if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) && ((vc->strands < 2) || ((i != se[so[0]]) && (j != ss[so[1]])))) { element_energy = mm; switch (dangle_model) { case 0: element_energy = E_MLstem(rt, -1, -1, P) + mm; break; default: element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm; break; } if (sc) { if (sc->energy_bp) element_energy += sc->energy_bp[ij]; if (sc->f) element_energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, sc->data); } /* multiloop decomposition */ if ((sc) && (sc->f)) { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { eee += fM1[indx[j - 1] + k] + best_energy; int aux_eee = element_energy + sc->f(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, sc->data); if ((eee + aux_eee) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env); } } } else { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { /* multiloop decomposition */ if ((eee + fM1[indx[j - 1] + k] + element_energy + best_energy) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + element_energy, 1, 3, env); } } } } if (sn[i] == sn[j]) { if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) && (!no_close)) { element_energy = vrna_E_hp_loop(vc, i, j); if (element_energy != INF) { if (element_energy + best_energy <= threshold) /* hairpin structure */ fork_state_pair(i, j, state, part_energy + element_energy, env); } } if (with_gquad) { /* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */ int cnt, *p, *q, *en, tmp_en; p = q = en = NULL; en = E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P); for (cnt = 0; p[cnt] != -1; cnt++) { if ((hc->up_int[i + 1] >= p[cnt] - i - 1) && (hc->up_int[q[cnt] + 1] >= j - q[cnt] - 1)) { tmp_en = en[cnt]; if (sc) { if (sc->energy_bp) tmp_en += sc->energy_bp[ij]; if (sc->energy_up) tmp_en += sc->energy_up[i + 1][p[cnt] - i - 1] + sc->energy_up[q[cnt] + 1][j - q[cnt] - 1]; } new_state = derive_new_state(p[cnt], q[cnt], state, tmp_en + part_energy, 6); make_pair(i, j, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; } } free(en); free(p); free(q); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void old_subopt_print(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; if (structure && d->fp) { char *e_string = vrna_strdup_printf(" %6.2f", energy); print_structure(d->fp, structure, e_string); free(e_string); } } PRIVATE void old_subopt_store(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; d->SolutionList[d->n_sol++].structure = strdup(structure); } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; if (d->cp > 0) { int cp = d->cp; char *s = vrna_cut_point_remove(structure, &cp); d->SolutionList[d->n_sol++].structure = vrna_db_pack(s); free(s); } else { d->SolutionList[d->n_sol++].structure = vrna_db_pack(structure); } } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PUBLIC SOLUTION * subopt(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp); } PUBLIC SOLUTION * subopt_circ(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp); } PUBLIC SOLUTION * subopt_par(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp); } PRIVATE SOLUTION * wrap_subopt(char *string, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { vrna_fold_compound_t *vc; vrna_param_t *P; char *seq; #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } P->model_details.circ = is_circular; P->model_details.uniq_ML = uniq_ML = 1; /* what about cofold sequences here? Is it safe to call the below cut_point_insert() ? */ /* dirty hack to reinsert the '&' according to the global variable 'cut_point' */ seq = vrna_cut_point_insert(string, cut_point); vc = vrna_fold_compound(seq, &(P->model_details), ((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT)); if (parameters) { /* replace params if necessary */ free(vc->params); vc->params = P; } else { free(P); } /* handle hard constraints in pseudo dot-bracket format if passed via simple interface */ if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK | VRNA_CONSTRAINT_DB_INTRAMOL | VRNA_CONSTRAINT_DB_INTERMOL; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; /* cleanup */ free(seq); return vrna_subopt(vc, delta, subopt_sorted, fp); } #endif /*---------------------------------------------------------------------------*/ /* Well, that is the end!----------------------------------------------------*/ /*---------------------------------------------------------------------------*/
omp_zhemm_batch.c
/** * @file omp_zhemm_batch.c * * @brief BBLAS omp_zhemm_batch double _Complex routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @precisions normal z -> c **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define COMPLEX /** Purpose ------- <b>zhemm_batch</b> is an OpenMP version of zhemm_batch. It performs the matrix-matrix operations arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i], or arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i], where alpha[i] and beta[i] are scalars, arrayA[i] is a hermitian matrix, and arrayB[i] and C are M[i] by N[i] matrices. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of side[0], uplo[0], M[0], N[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] side Array of <tt>enum BBLAS_SIDE</tt>. On entry, side[i] specifies whether the hermitian matrix A appears on the left or right in the operation as follows: - = 'BblasLeft' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i]. - = 'BblasRight' arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i]. @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the upper or lower triangular part of the hermitian matrix arrayA[i] is to be referenced as follows: - = 'BblasUpper' Only the upper triangular part of the hermitian matrix is to be referenced. - = 'BblasLower' Only the lower triangular part of the hermitian matrix is to be referenced. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix arrayC[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] alpha Array of <tt>complex_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of dimension lda[i] by Ka[i], where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise. When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i] must contain the Hermitian matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the Hermitian matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the Hermitian matrix whilst the strictly upper triangular part is not used. When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must contain the Hermitian matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the Hermitian matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the Hermitian matrix whilst the strictly upper triangular part is not used. Note that the imaginary parts of the diagonal elements need not be set, they are assumed to be zero. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When side[i] = BblasLeft then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, N[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a COMPLEX_16 matrix of dimension ldb[i] by N[i]. The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ). @param[in] beta Array of <tt>complex_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each element arrayC[i] is a pointer to a COMPLEX_16 matrix of dimension ldc[i] by N[i]. Before entry, the leading M[i] by N[i] part of the arrayC[i] must contain a matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix output. @param[in] ldc Array of <tt>int</tt>. Each element ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. The value ldc[i] must be at least max( 1, M[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith zhemm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_zhemm_batch( const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo, const int *M, const int *N, const BBLAS_Complex64_t *alpha, const BBLAS_Complex64_t **arrayA, const int *lda, const BBLAS_Complex64_t **arrayB, const int *ldb, const BBLAS_Complex64_t *beta, BBLAS_Complex64_t **arrayC, const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA; char func_name[15] = "zhemm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((side[first_index] != BblasLeft) && (side[first_index] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_SIDE; } return; } if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (side[first_index] == BblasLeft){ LDA = M[first_index]; } else { LDA = N[first_index]; } if (lda[first_index] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[first_index] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* paritular case */ if (M[first_index] == 0 || N[first_index] == 0 || ((alpha[first_index] == (BBLAS_Complex64_t)0.0) && (beta[first_index] == (BBLAS_Complex64_t)1.0))) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private(batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_zhemm */ cblas_zhemm( BblasColMajor, side[first_index], uplo[first_index], M[first_index], N[first_index], CBLAS_SADDR(alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index], CBLAS_SADDR(beta[first_index]), arrayC[batch_iter], ldc[first_index]); info[batch_iter] = BBLAS_SUCCESS; } }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private(batch_iter, LDA) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((side[batch_iter] != BblasLeft) && (side[batch_iter] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter); info[batch_iter] = BBLAS_ERR_SIDE; continue; } if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if (M[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter); info[batch_iter] = BBLAS_ERR_M; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (side[batch_iter] == BblasLeft) { LDA = M[batch_iter]; } else { LDA = N[batch_iter]; } if (lda[batch_iter] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter); info[batch_iter] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (M[batch_iter] == 0 || N[batch_iter] == 0 || ((alpha[batch_iter] == (BBLAS_Complex64_t)0.0) && beta[batch_iter] == (BBLAS_Complex64_t)1.0)) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_zhemm( BblasColMajor, side[batch_iter], uplo[batch_iter], M[batch_iter], N[batch_iter], CBLAS_SADDR(alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter], CBLAS_SADDR(beta[batch_iter]), arrayC[batch_iter], ldc[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } }else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef COMPLEX
cancel-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void f1 (void) { #pragma omp cancel parallel /* { dg-error "orphaned" } */ #pragma omp cancel for /* { dg-error "orphaned" } */ #pragma omp cancel sections /* { dg-error "orphaned" } */ #pragma omp cancel taskgroup /* { dg-error "orphaned" } */ #pragma omp cancellation point parallel /* { dg-error "orphaned" } */ #pragma omp cancellation point for /* { dg-error "orphaned" } */ #pragma omp cancellation point sections /* { dg-error "orphaned" } */ #pragma omp cancellation point taskgroup /* { dg-error "orphaned" } */ } void f2 (void) { int i, j = 0; #pragma omp parallel { #pragma omp cancel parallel #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp master { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp single { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp critical { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp taskgroup { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp task { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ } #pragma omp taskgroup #pragma omp task { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup } #pragma omp taskgroup { #pragma omp task { #pragma omp task { #pragma omp cancellation point taskgroup #pragma omp cancel taskgroup } } } #pragma omp taskgroup { #pragma omp parallel { #pragma omp task { #pragma omp cancel taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ #pragma omp cancellation point taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ } } #pragma omp target { #pragma omp task { #pragma omp cancel taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ #pragma omp cancellation point taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ } } #pragma omp target #pragma omp teams #pragma omp distribute for (i = 0; i < 10; i++) { #pragma omp task { #pragma omp cancel taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ #pragma omp cancellation point taskgroup /* { dg-error "construct not closely nested inside of .taskgroup. region" } */ } } #pragma omp target data map(i) { #pragma omp task { #pragma omp cancel taskgroup #pragma omp cancellation point taskgroup } } } #pragma omp for for (i = 0; i < 10; i++) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup/* { dg-error "not closely nested inside" } */ } #pragma omp for ordered for (i = 0; i < 10; i++) #pragma omp ordered { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup/* { dg-error "not closely nested inside" } */ } #pragma omp sections { { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections #pragma omp cancellation point taskgroup/* { dg-error "not closely nested inside" } */ } #pragma omp section { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections #pragma omp cancellation point taskgroup/* { dg-error "not closely nested inside" } */ } } #pragma omp target data map(j) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp target { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } } #pragma omp target data map(j) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp target { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp target teams { #pragma omp cancel parallel /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancel for /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancel sections /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancel taskgroup /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancellation point parallel /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancellation point for /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancellation point sections /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ #pragma omp cancellation point taskgroup /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */ } #pragma omp target teams distribute for (i = 0; i < 10; i++) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp for for (i = 0; i < 10; i++) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp for for (i = 0; i < 10; i++) #pragma omp target data map(j) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp for for (i = 0; i < 10; i++) #pragma omp target { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp for ordered for (i = 0; i < 10; i++) #pragma omp ordered #pragma omp target data map(j) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup/* { dg-error "not closely nested inside" } */ } #pragma omp for ordered for (i = 0; i < 10; i++) #pragma omp ordered #pragma omp target { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup/* { dg-error "not closely nested inside" } */ } #pragma omp sections { { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp section { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } } #pragma omp sections { #pragma omp target data map(j) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp section #pragma omp target data map(j) { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } } #pragma omp sections { #pragma omp target { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } #pragma omp section #pragma omp target { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } } #pragma omp task { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup #pragma omp taskgroup { #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancel for /* { dg-error "not closely nested inside" } */ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */ } } } void f3 (void) { int i; #pragma omp for nowait for (i = 0; i < 10; i++) { #pragma omp cancel for /* { dg-warning "nowait" } */ } #pragma omp sections nowait { { #pragma omp cancel sections /* { dg-warning "nowait" } */ } #pragma omp section { #pragma omp cancel sections /* { dg-warning "nowait" } */ } } #pragma omp for ordered for (i = 0; i < 10; i++) { #pragma omp cancel for /* { dg-warning "ordered" } */ #pragma omp ordered { } } } #pragma omp cancellation point /* { dg-error "expected declaration specifiers before end of line" } */ void f4 (void) { if (0) #pragma omp cancellation EKAHI /* { dg-error "expected .point. before .EKAHI." } */ ; #pragma omp cancellation HO OKAHI /* { dg-error "expected .point. before .HO." } */ if (0) #pragma omp cancellation point /* { dg-error ".pragma omp cancellation point. may only be used in compound statements" } */ ; #pragma omp cancellation point /* { dg-error ".pragma omp cancellation point. must specify one of" } */ }
convolutiondepthwise_5x5_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* r5 = img0.row(5); int i = 0; #if __aarch64__ for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _sum00 = _bias0; float32x4_t _sum01 = _bias0; float32x4_t _sum02 = _bias0; float32x4_t _sum03 = _bias0; float32x4_t _sum10 = _bias0; float32x4_t _sum11 = _bias0; float32x4_t _sum12 = _bias0; float32x4_t _sum13 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum00 = vmlaq_f32(_sum00, _k00, _r00); _sum00 = vmlaq_f32(_sum00, _k01, _r01); _sum00 = vmlaq_f32(_sum00, _k02, _r02); _sum00 = vmlaq_f32(_sum00, _k03, _r03); _sum00 = vmlaq_f32(_sum00, _k04, _r04); _sum01 = vmlaq_f32(_sum01, _k00, _r01); _sum01 = vmlaq_f32(_sum01, _k01, _r02); _sum01 = vmlaq_f32(_sum01, _k02, _r03); _sum01 = vmlaq_f32(_sum01, _k03, _r04); _sum01 = vmlaq_f32(_sum01, _k04, _r05); _sum02 = vmlaq_f32(_sum02, _k00, _r02); _sum02 = vmlaq_f32(_sum02, _k01, _r03); _sum02 = vmlaq_f32(_sum02, _k02, _r04); _sum02 = vmlaq_f32(_sum02, _k03, _r05); _sum02 = vmlaq_f32(_sum02, _k04, _r06); _sum03 = vmlaq_f32(_sum03, _k00, _r03); _sum03 = vmlaq_f32(_sum03, _k01, _r04); _sum03 = vmlaq_f32(_sum03, _k02, _r05); _sum03 = vmlaq_f32(_sum03, _k03, _r06); _sum03 = vmlaq_f32(_sum03, _k04, _r07); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _r17 = vld1q_f32(r1 + 28); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k00, _r10); _sum10 = vmlaq_f32(_sum10, _k01, _r11); _sum10 = vmlaq_f32(_sum10, _k02, _r12); _sum10 = vmlaq_f32(_sum10, _k03, _r13); _sum10 = vmlaq_f32(_sum10, _k04, _r14); _sum11 = vmlaq_f32(_sum11, _k00, _r11); _sum11 = vmlaq_f32(_sum11, _k01, _r12); _sum11 = vmlaq_f32(_sum11, _k02, _r13); _sum11 = vmlaq_f32(_sum11, _k03, _r14); _sum11 = vmlaq_f32(_sum11, _k04, _r15); _sum12 = vmlaq_f32(_sum12, _k00, _r12); _sum12 = vmlaq_f32(_sum12, _k01, _r13); _sum12 = vmlaq_f32(_sum12, _k02, _r14); _sum12 = vmlaq_f32(_sum12, _k03, _r15); _sum12 = vmlaq_f32(_sum12, _k04, _r16); _sum13 = vmlaq_f32(_sum13, _k00, _r13); _sum13 = vmlaq_f32(_sum13, _k01, _r14); _sum13 = vmlaq_f32(_sum13, _k02, _r15); _sum13 = vmlaq_f32(_sum13, _k03, _r16); _sum13 = vmlaq_f32(_sum13, _k04, _r17); _sum00 = vmlaq_f32(_sum00, _k10, _r10); _sum00 = vmlaq_f32(_sum00, _k11, _r11); _sum00 = vmlaq_f32(_sum00, _k12, _r12); _sum00 = vmlaq_f32(_sum00, _k13, _r13); _sum00 = vmlaq_f32(_sum00, _k14, _r14); _sum01 = vmlaq_f32(_sum01, _k10, _r11); _sum01 = vmlaq_f32(_sum01, _k11, _r12); _sum01 = vmlaq_f32(_sum01, _k12, _r13); _sum01 = vmlaq_f32(_sum01, _k13, _r14); _sum01 = vmlaq_f32(_sum01, _k14, _r15); _sum02 = vmlaq_f32(_sum02, _k10, _r12); _sum02 = vmlaq_f32(_sum02, _k11, _r13); _sum02 = vmlaq_f32(_sum02, _k12, _r14); _sum02 = vmlaq_f32(_sum02, _k13, _r15); _sum02 = vmlaq_f32(_sum02, _k14, _r16); _sum03 = vmlaq_f32(_sum03, _k10, _r13); _sum03 = vmlaq_f32(_sum03, _k11, _r14); _sum03 = vmlaq_f32(_sum03, _k12, _r15); _sum03 = vmlaq_f32(_sum03, _k13, _r16); _sum03 = vmlaq_f32(_sum03, _k14, _r17); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _r27 = vld1q_f32(r2 + 28); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k10, _r20); _sum10 = vmlaq_f32(_sum10, _k11, _r21); _sum10 = vmlaq_f32(_sum10, _k12, _r22); _sum10 = vmlaq_f32(_sum10, _k13, _r23); _sum10 = vmlaq_f32(_sum10, _k14, _r24); _sum11 = vmlaq_f32(_sum11, _k10, _r21); _sum11 = vmlaq_f32(_sum11, _k11, _r22); _sum11 = vmlaq_f32(_sum11, _k12, _r23); _sum11 = vmlaq_f32(_sum11, _k13, _r24); _sum11 = vmlaq_f32(_sum11, _k14, _r25); _sum12 = vmlaq_f32(_sum12, _k10, _r22); _sum12 = vmlaq_f32(_sum12, _k11, _r23); _sum12 = vmlaq_f32(_sum12, _k12, _r24); _sum12 = vmlaq_f32(_sum12, _k13, _r25); _sum12 = vmlaq_f32(_sum12, _k14, _r26); _sum13 = vmlaq_f32(_sum13, _k10, _r23); _sum13 = vmlaq_f32(_sum13, _k11, _r24); _sum13 = vmlaq_f32(_sum13, _k12, _r25); _sum13 = vmlaq_f32(_sum13, _k13, _r26); _sum13 = vmlaq_f32(_sum13, _k14, _r27); _sum00 = vmlaq_f32(_sum00, _k20, _r20); _sum00 = vmlaq_f32(_sum00, _k21, _r21); _sum00 = vmlaq_f32(_sum00, _k22, _r22); _sum00 = vmlaq_f32(_sum00, _k23, _r23); _sum00 = vmlaq_f32(_sum00, _k24, _r24); _sum01 = vmlaq_f32(_sum01, _k20, _r21); _sum01 = vmlaq_f32(_sum01, _k21, _r22); _sum01 = vmlaq_f32(_sum01, _k22, _r23); _sum01 = vmlaq_f32(_sum01, _k23, _r24); _sum01 = vmlaq_f32(_sum01, _k24, _r25); _sum02 = vmlaq_f32(_sum02, _k20, _r22); _sum02 = vmlaq_f32(_sum02, _k21, _r23); _sum02 = vmlaq_f32(_sum02, _k22, _r24); _sum02 = vmlaq_f32(_sum02, _k23, _r25); _sum02 = vmlaq_f32(_sum02, _k24, _r26); _sum03 = vmlaq_f32(_sum03, _k20, _r23); _sum03 = vmlaq_f32(_sum03, _k21, _r24); _sum03 = vmlaq_f32(_sum03, _k22, _r25); _sum03 = vmlaq_f32(_sum03, _k23, _r26); _sum03 = vmlaq_f32(_sum03, _k24, _r27); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _r37 = vld1q_f32(r3 + 28); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k20, _r30); _sum10 = vmlaq_f32(_sum10, _k21, _r31); _sum10 = vmlaq_f32(_sum10, _k22, _r32); _sum10 = vmlaq_f32(_sum10, _k23, _r33); _sum10 = vmlaq_f32(_sum10, _k24, _r34); _sum11 = vmlaq_f32(_sum11, _k20, _r31); _sum11 = vmlaq_f32(_sum11, _k21, _r32); _sum11 = vmlaq_f32(_sum11, _k22, _r33); _sum11 = vmlaq_f32(_sum11, _k23, _r34); _sum11 = vmlaq_f32(_sum11, _k24, _r35); _sum12 = vmlaq_f32(_sum12, _k20, _r32); _sum12 = vmlaq_f32(_sum12, _k21, _r33); _sum12 = vmlaq_f32(_sum12, _k22, _r34); _sum12 = vmlaq_f32(_sum12, _k23, _r35); _sum12 = vmlaq_f32(_sum12, _k24, _r36); _sum13 = vmlaq_f32(_sum13, _k20, _r33); _sum13 = vmlaq_f32(_sum13, _k21, _r34); _sum13 = vmlaq_f32(_sum13, _k22, _r35); _sum13 = vmlaq_f32(_sum13, _k23, _r36); _sum13 = vmlaq_f32(_sum13, _k24, _r37); _sum00 = vmlaq_f32(_sum00, _k30, _r30); _sum00 = vmlaq_f32(_sum00, _k31, _r31); _sum00 = vmlaq_f32(_sum00, _k32, _r32); _sum00 = vmlaq_f32(_sum00, _k33, _r33); _sum00 = vmlaq_f32(_sum00, _k34, _r34); _sum01 = vmlaq_f32(_sum01, _k30, _r31); _sum01 = vmlaq_f32(_sum01, _k31, _r32); _sum01 = vmlaq_f32(_sum01, _k32, _r33); _sum01 = vmlaq_f32(_sum01, _k33, _r34); _sum01 = vmlaq_f32(_sum01, _k34, _r35); _sum02 = vmlaq_f32(_sum02, _k30, _r32); _sum02 = vmlaq_f32(_sum02, _k31, _r33); _sum02 = vmlaq_f32(_sum02, _k32, _r34); _sum02 = vmlaq_f32(_sum02, _k33, _r35); _sum02 = vmlaq_f32(_sum02, _k34, _r36); _sum03 = vmlaq_f32(_sum03, _k30, _r33); _sum03 = vmlaq_f32(_sum03, _k31, _r34); _sum03 = vmlaq_f32(_sum03, _k32, _r35); _sum03 = vmlaq_f32(_sum03, _k33, _r36); _sum03 = vmlaq_f32(_sum03, _k34, _r37); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _r47 = vld1q_f32(r4 + 28); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum10 = vmlaq_f32(_sum10, _k30, _r40); _sum10 = vmlaq_f32(_sum10, _k31, _r41); _sum10 = vmlaq_f32(_sum10, _k32, _r42); _sum10 = vmlaq_f32(_sum10, _k33, _r43); _sum10 = vmlaq_f32(_sum10, _k34, _r44); _sum11 = vmlaq_f32(_sum11, _k30, _r41); _sum11 = vmlaq_f32(_sum11, _k31, _r42); _sum11 = vmlaq_f32(_sum11, _k32, _r43); _sum11 = vmlaq_f32(_sum11, _k33, _r44); _sum11 = vmlaq_f32(_sum11, _k34, _r45); _sum12 = vmlaq_f32(_sum12, _k30, _r42); _sum12 = vmlaq_f32(_sum12, _k31, _r43); _sum12 = vmlaq_f32(_sum12, _k32, _r44); _sum12 = vmlaq_f32(_sum12, _k33, _r45); _sum12 = vmlaq_f32(_sum12, _k34, _r46); _sum13 = vmlaq_f32(_sum13, _k30, _r43); _sum13 = vmlaq_f32(_sum13, _k31, _r44); _sum13 = vmlaq_f32(_sum13, _k32, _r45); _sum13 = vmlaq_f32(_sum13, _k33, _r46); _sum13 = vmlaq_f32(_sum13, _k34, _r47); _sum00 = vmlaq_f32(_sum00, _k40, _r40); _sum00 = vmlaq_f32(_sum00, _k41, _r41); _sum00 = vmlaq_f32(_sum00, _k42, _r42); _sum00 = vmlaq_f32(_sum00, _k43, _r43); _sum00 = vmlaq_f32(_sum00, _k44, _r44); _sum01 = vmlaq_f32(_sum01, _k40, _r41); _sum01 = vmlaq_f32(_sum01, _k41, _r42); _sum01 = vmlaq_f32(_sum01, _k42, _r43); _sum01 = vmlaq_f32(_sum01, _k43, _r44); _sum01 = vmlaq_f32(_sum01, _k44, _r45); _sum02 = vmlaq_f32(_sum02, _k40, _r42); _sum02 = vmlaq_f32(_sum02, _k41, _r43); _sum02 = vmlaq_f32(_sum02, _k42, _r44); _sum02 = vmlaq_f32(_sum02, _k43, _r45); _sum02 = vmlaq_f32(_sum02, _k44, _r46); _sum03 = vmlaq_f32(_sum03, _k40, _r43); _sum03 = vmlaq_f32(_sum03, _k41, _r44); _sum03 = vmlaq_f32(_sum03, _k42, _r45); _sum03 = vmlaq_f32(_sum03, _k43, _r46); _sum03 = vmlaq_f32(_sum03, _k44, _r47); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r51 = vld1q_f32(r5 + 4); float32x4_t _r52 = vld1q_f32(r5 + 8); float32x4_t _r53 = vld1q_f32(r5 + 12); float32x4_t _r54 = vld1q_f32(r5 + 16); float32x4_t _r55 = vld1q_f32(r5 + 20); float32x4_t _r56 = vld1q_f32(r5 + 24); float32x4_t _r57 = vld1q_f32(r5 + 28); _sum10 = vmlaq_f32(_sum10, _k40, _r50); _sum10 = vmlaq_f32(_sum10, _k41, _r51); _sum10 = vmlaq_f32(_sum10, _k42, _r52); _sum10 = vmlaq_f32(_sum10, _k43, _r53); _sum10 = vmlaq_f32(_sum10, _k44, _r54); _sum11 = vmlaq_f32(_sum11, _k40, _r51); _sum11 = vmlaq_f32(_sum11, _k41, _r52); _sum11 = vmlaq_f32(_sum11, _k42, _r53); _sum11 = vmlaq_f32(_sum11, _k43, _r54); _sum11 = vmlaq_f32(_sum11, _k44, _r55); _sum12 = vmlaq_f32(_sum12, _k40, _r52); _sum12 = vmlaq_f32(_sum12, _k41, _r53); _sum12 = vmlaq_f32(_sum12, _k42, _r54); _sum12 = vmlaq_f32(_sum12, _k43, _r55); _sum12 = vmlaq_f32(_sum12, _k44, _r56); _sum13 = vmlaq_f32(_sum13, _k40, _r53); _sum13 = vmlaq_f32(_sum13, _k41, _r54); _sum13 = vmlaq_f32(_sum13, _k42, _r55); _sum13 = vmlaq_f32(_sum13, _k43, _r56); _sum13 = vmlaq_f32(_sum13, _k44, _r57); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr0 + 4, _sum01); vst1q_f32(outptr0 + 8, _sum02); vst1q_f32(outptr0 + 12, _sum03); vst1q_f32(outptr1, _sum10); vst1q_f32(outptr1 + 4, _sum11); vst1q_f32(outptr1 + 8, _sum12); vst1q_f32(outptr1 + 12, _sum13); r0 += 16; r1 += 16; r2 += 16; r3 += 16; r4 += 16; r5 += 16; outptr0 += 16; outptr1 += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _sum00 = _bias0; float32x4_t _sum01 = _bias0; float32x4_t _sum10 = _bias0; float32x4_t _sum11 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum00 = vmlaq_f32(_sum00, _k00, _r00); _sum00 = vmlaq_f32(_sum00, _k01, _r01); _sum00 = vmlaq_f32(_sum00, _k02, _r02); _sum00 = vmlaq_f32(_sum00, _k03, _r03); _sum00 = vmlaq_f32(_sum00, _k04, _r04); _sum01 = vmlaq_f32(_sum01, _k00, _r01); _sum01 = vmlaq_f32(_sum01, _k01, _r02); _sum01 = vmlaq_f32(_sum01, _k02, _r03); _sum01 = vmlaq_f32(_sum01, _k03, _r04); _sum01 = vmlaq_f32(_sum01, _k04, _r05); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k00, _r10); _sum10 = vmlaq_f32(_sum10, _k01, _r11); _sum10 = vmlaq_f32(_sum10, _k02, _r12); _sum10 = vmlaq_f32(_sum10, _k03, _r13); _sum10 = vmlaq_f32(_sum10, _k04, _r14); _sum11 = vmlaq_f32(_sum11, _k00, _r11); _sum11 = vmlaq_f32(_sum11, _k01, _r12); _sum11 = vmlaq_f32(_sum11, _k02, _r13); _sum11 = vmlaq_f32(_sum11, _k03, _r14); _sum11 = vmlaq_f32(_sum11, _k04, _r15); _sum00 = vmlaq_f32(_sum00, _k10, _r10); _sum00 = vmlaq_f32(_sum00, _k11, _r11); _sum00 = vmlaq_f32(_sum00, _k12, _r12); _sum00 = vmlaq_f32(_sum00, _k13, _r13); _sum00 = vmlaq_f32(_sum00, _k14, _r14); _sum01 = vmlaq_f32(_sum01, _k10, _r11); _sum01 = vmlaq_f32(_sum01, _k11, _r12); _sum01 = vmlaq_f32(_sum01, _k12, _r13); _sum01 = vmlaq_f32(_sum01, _k13, _r14); _sum01 = vmlaq_f32(_sum01, _k14, _r15); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k10, _r20); _sum10 = vmlaq_f32(_sum10, _k11, _r21); _sum10 = vmlaq_f32(_sum10, _k12, _r22); _sum10 = vmlaq_f32(_sum10, _k13, _r23); _sum10 = vmlaq_f32(_sum10, _k14, _r24); _sum11 = vmlaq_f32(_sum11, _k10, _r21); _sum11 = vmlaq_f32(_sum11, _k11, _r22); _sum11 = vmlaq_f32(_sum11, _k12, _r23); _sum11 = vmlaq_f32(_sum11, _k13, _r24); _sum11 = vmlaq_f32(_sum11, _k14, _r25); _sum00 = vmlaq_f32(_sum00, _k20, _r20); _sum00 = vmlaq_f32(_sum00, _k21, _r21); _sum00 = vmlaq_f32(_sum00, _k22, _r22); _sum00 = vmlaq_f32(_sum00, _k23, _r23); _sum00 = vmlaq_f32(_sum00, _k24, _r24); _sum01 = vmlaq_f32(_sum01, _k20, _r21); _sum01 = vmlaq_f32(_sum01, _k21, _r22); _sum01 = vmlaq_f32(_sum01, _k22, _r23); _sum01 = vmlaq_f32(_sum01, _k23, _r24); _sum01 = vmlaq_f32(_sum01, _k24, _r25); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k20, _r30); _sum10 = vmlaq_f32(_sum10, _k21, _r31); _sum10 = vmlaq_f32(_sum10, _k22, _r32); _sum10 = vmlaq_f32(_sum10, _k23, _r33); _sum10 = vmlaq_f32(_sum10, _k24, _r34); _sum11 = vmlaq_f32(_sum11, _k20, _r31); _sum11 = vmlaq_f32(_sum11, _k21, _r32); _sum11 = vmlaq_f32(_sum11, _k22, _r33); _sum11 = vmlaq_f32(_sum11, _k23, _r34); _sum11 = vmlaq_f32(_sum11, _k24, _r35); _sum00 = vmlaq_f32(_sum00, _k30, _r30); _sum00 = vmlaq_f32(_sum00, _k31, _r31); _sum00 = vmlaq_f32(_sum00, _k32, _r32); _sum00 = vmlaq_f32(_sum00, _k33, _r33); _sum00 = vmlaq_f32(_sum00, _k34, _r34); _sum01 = vmlaq_f32(_sum01, _k30, _r31); _sum01 = vmlaq_f32(_sum01, _k31, _r32); _sum01 = vmlaq_f32(_sum01, _k32, _r33); _sum01 = vmlaq_f32(_sum01, _k33, _r34); _sum01 = vmlaq_f32(_sum01, _k34, _r35); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum10 = vmlaq_f32(_sum10, _k30, _r40); _sum10 = vmlaq_f32(_sum10, _k31, _r41); _sum10 = vmlaq_f32(_sum10, _k32, _r42); _sum10 = vmlaq_f32(_sum10, _k33, _r43); _sum10 = vmlaq_f32(_sum10, _k34, _r44); _sum11 = vmlaq_f32(_sum11, _k30, _r41); _sum11 = vmlaq_f32(_sum11, _k31, _r42); _sum11 = vmlaq_f32(_sum11, _k32, _r43); _sum11 = vmlaq_f32(_sum11, _k33, _r44); _sum11 = vmlaq_f32(_sum11, _k34, _r45); _sum00 = vmlaq_f32(_sum00, _k40, _r40); _sum00 = vmlaq_f32(_sum00, _k41, _r41); _sum00 = vmlaq_f32(_sum00, _k42, _r42); _sum00 = vmlaq_f32(_sum00, _k43, _r43); _sum00 = vmlaq_f32(_sum00, _k44, _r44); _sum01 = vmlaq_f32(_sum01, _k40, _r41); _sum01 = vmlaq_f32(_sum01, _k41, _r42); _sum01 = vmlaq_f32(_sum01, _k42, _r43); _sum01 = vmlaq_f32(_sum01, _k43, _r44); _sum01 = vmlaq_f32(_sum01, _k44, _r45); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r51 = vld1q_f32(r5 + 4); float32x4_t _r52 = vld1q_f32(r5 + 8); float32x4_t _r53 = vld1q_f32(r5 + 12); float32x4_t _r54 = vld1q_f32(r5 + 16); float32x4_t _r55 = vld1q_f32(r5 + 20); _sum10 = vmlaq_f32(_sum10, _k40, _r50); _sum10 = vmlaq_f32(_sum10, _k41, _r51); _sum10 = vmlaq_f32(_sum10, _k42, _r52); _sum10 = vmlaq_f32(_sum10, _k43, _r53); _sum10 = vmlaq_f32(_sum10, _k44, _r54); _sum11 = vmlaq_f32(_sum11, _k40, _r51); _sum11 = vmlaq_f32(_sum11, _k41, _r52); _sum11 = vmlaq_f32(_sum11, _k42, _r53); _sum11 = vmlaq_f32(_sum11, _k43, _r54); _sum11 = vmlaq_f32(_sum11, _k44, _r55); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr0 + 4, _sum01); vst1q_f32(outptr1, _sum10); vst1q_f32(outptr1 + 4, _sum11); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; outptr0 += 8; outptr1 += 8; } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum1 = vmlaq_f32(_sum1, _k00, _r10); _sum1 = vmlaq_f32(_sum1, _k01, _r11); _sum1 = vmlaq_f32(_sum1, _k02, _r12); _sum1 = vmlaq_f32(_sum1, _k03, _r13); _sum1 = vmlaq_f32(_sum1, _k04, _r14); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum1 = vmlaq_f32(_sum1, _k10, _r20); _sum1 = vmlaq_f32(_sum1, _k11, _r21); _sum1 = vmlaq_f32(_sum1, _k12, _r22); _sum1 = vmlaq_f32(_sum1, _k13, _r23); _sum1 = vmlaq_f32(_sum1, _k14, _r24); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum1 = vmlaq_f32(_sum1, _k20, _r30); _sum1 = vmlaq_f32(_sum1, _k21, _r31); _sum1 = vmlaq_f32(_sum1, _k22, _r32); _sum1 = vmlaq_f32(_sum1, _k23, _r33); _sum1 = vmlaq_f32(_sum1, _k24, _r34); _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum1 = vmlaq_f32(_sum1, _k30, _r40); _sum1 = vmlaq_f32(_sum1, _k31, _r41); _sum1 = vmlaq_f32(_sum1, _k32, _r42); _sum1 = vmlaq_f32(_sum1, _k33, _r43); _sum1 = vmlaq_f32(_sum1, _k34, _r44); _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r51 = vld1q_f32(r5 + 4); float32x4_t _r52 = vld1q_f32(r5 + 8); float32x4_t _r53 = vld1q_f32(r5 + 12); float32x4_t _r54 = vld1q_f32(r5 + 16); _sum1 = vmlaq_f32(_sum1, _k40, _r50); _sum1 = vmlaq_f32(_sum1, _k41, _r51); _sum1 = vmlaq_f32(_sum1, _k42, _r52); _sum1 = vmlaq_f32(_sum1, _k43, _r53); _sum1 = vmlaq_f32(_sum1, _k44, _r54); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; outptr0 += 4; outptr1 += 4; } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _sum2 = _bias0; float32x4_t _sum3 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r01); _sum1 = vmlaq_f32(_sum1, _k01, _r02); _sum1 = vmlaq_f32(_sum1, _k02, _r03); _sum1 = vmlaq_f32(_sum1, _k03, _r04); _sum1 = vmlaq_f32(_sum1, _k04, _r05); _sum2 = vmlaq_f32(_sum2, _k00, _r02); _sum2 = vmlaq_f32(_sum2, _k01, _r03); _sum2 = vmlaq_f32(_sum2, _k02, _r04); _sum2 = vmlaq_f32(_sum2, _k03, _r05); _sum2 = vmlaq_f32(_sum2, _k04, _r06); _sum3 = vmlaq_f32(_sum3, _k00, _r03); _sum3 = vmlaq_f32(_sum3, _k01, _r04); _sum3 = vmlaq_f32(_sum3, _k02, _r05); _sum3 = vmlaq_f32(_sum3, _k03, _r06); _sum3 = vmlaq_f32(_sum3, _k04, _r07); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _r17 = vld1q_f32(r1 + 28); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r11); _sum1 = vmlaq_f32(_sum1, _k11, _r12); _sum1 = vmlaq_f32(_sum1, _k12, _r13); _sum1 = vmlaq_f32(_sum1, _k13, _r14); _sum1 = vmlaq_f32(_sum1, _k14, _r15); _sum2 = vmlaq_f32(_sum2, _k10, _r12); _sum2 = vmlaq_f32(_sum2, _k11, _r13); _sum2 = vmlaq_f32(_sum2, _k12, _r14); _sum2 = vmlaq_f32(_sum2, _k13, _r15); _sum2 = vmlaq_f32(_sum2, _k14, _r16); _sum3 = vmlaq_f32(_sum3, _k10, _r13); _sum3 = vmlaq_f32(_sum3, _k11, _r14); _sum3 = vmlaq_f32(_sum3, _k12, _r15); _sum3 = vmlaq_f32(_sum3, _k13, _r16); _sum3 = vmlaq_f32(_sum3, _k14, _r17); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _r27 = vld1q_f32(r2 + 28); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r21); _sum1 = vmlaq_f32(_sum1, _k21, _r22); _sum1 = vmlaq_f32(_sum1, _k22, _r23); _sum1 = vmlaq_f32(_sum1, _k23, _r24); _sum1 = vmlaq_f32(_sum1, _k24, _r25); _sum2 = vmlaq_f32(_sum2, _k20, _r22); _sum2 = vmlaq_f32(_sum2, _k21, _r23); _sum2 = vmlaq_f32(_sum2, _k22, _r24); _sum2 = vmlaq_f32(_sum2, _k23, _r25); _sum2 = vmlaq_f32(_sum2, _k24, _r26); _sum3 = vmlaq_f32(_sum3, _k20, _r23); _sum3 = vmlaq_f32(_sum3, _k21, _r24); _sum3 = vmlaq_f32(_sum3, _k22, _r25); _sum3 = vmlaq_f32(_sum3, _k23, _r26); _sum3 = vmlaq_f32(_sum3, _k24, _r27); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _r37 = vld1q_f32(r3 + 28); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r31); _sum1 = vmlaq_f32(_sum1, _k31, _r32); _sum1 = vmlaq_f32(_sum1, _k32, _r33); _sum1 = vmlaq_f32(_sum1, _k33, _r34); _sum1 = vmlaq_f32(_sum1, _k34, _r35); _sum2 = vmlaq_f32(_sum2, _k30, _r32); _sum2 = vmlaq_f32(_sum2, _k31, _r33); _sum2 = vmlaq_f32(_sum2, _k32, _r34); _sum2 = vmlaq_f32(_sum2, _k33, _r35); _sum2 = vmlaq_f32(_sum2, _k34, _r36); _sum3 = vmlaq_f32(_sum3, _k30, _r33); _sum3 = vmlaq_f32(_sum3, _k31, _r34); _sum3 = vmlaq_f32(_sum3, _k32, _r35); _sum3 = vmlaq_f32(_sum3, _k33, _r36); _sum3 = vmlaq_f32(_sum3, _k34, _r37); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _r47 = vld1q_f32(r4 + 28); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r41); _sum1 = vmlaq_f32(_sum1, _k41, _r42); _sum1 = vmlaq_f32(_sum1, _k42, _r43); _sum1 = vmlaq_f32(_sum1, _k43, _r44); _sum1 = vmlaq_f32(_sum1, _k44, _r45); _sum2 = vmlaq_f32(_sum2, _k40, _r42); _sum2 = vmlaq_f32(_sum2, _k41, _r43); _sum2 = vmlaq_f32(_sum2, _k42, _r44); _sum2 = vmlaq_f32(_sum2, _k43, _r45); _sum2 = vmlaq_f32(_sum2, _k44, _r46); _sum3 = vmlaq_f32(_sum3, _k40, _r43); _sum3 = vmlaq_f32(_sum3, _k41, _r44); _sum3 = vmlaq_f32(_sum3, _k42, _r45); _sum3 = vmlaq_f32(_sum3, _k43, _r46); _sum3 = vmlaq_f32(_sum3, _k44, _r47); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); vst1q_f32(outptr0 + 8, _sum2); vst1q_f32(outptr0 + 12, _sum3); r0 += 16; r1 += 16; r2 += 16; r3 += 16; r4 += 16; outptr0 += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r01); _sum1 = vmlaq_f32(_sum1, _k01, _r02); _sum1 = vmlaq_f32(_sum1, _k02, _r03); _sum1 = vmlaq_f32(_sum1, _k03, _r04); _sum1 = vmlaq_f32(_sum1, _k04, _r05); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r11); _sum1 = vmlaq_f32(_sum1, _k11, _r12); _sum1 = vmlaq_f32(_sum1, _k12, _r13); _sum1 = vmlaq_f32(_sum1, _k13, _r14); _sum1 = vmlaq_f32(_sum1, _k14, _r15); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r21); _sum1 = vmlaq_f32(_sum1, _k21, _r22); _sum1 = vmlaq_f32(_sum1, _k22, _r23); _sum1 = vmlaq_f32(_sum1, _k23, _r24); _sum1 = vmlaq_f32(_sum1, _k24, _r25); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r31); _sum1 = vmlaq_f32(_sum1, _k31, _r32); _sum1 = vmlaq_f32(_sum1, _k32, _r33); _sum1 = vmlaq_f32(_sum1, _k33, _r34); _sum1 = vmlaq_f32(_sum1, _k34, _r35); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r41); _sum1 = vmlaq_f32(_sum1, _k41, _r42); _sum1 = vmlaq_f32(_sum1, _k42, _r43); _sum1 = vmlaq_f32(_sum1, _k43, _r44); _sum1 = vmlaq_f32(_sum1, _k44, _r45); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; outptr0 += 8; } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); vst1q_f32(outptr0, _sum0); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; outptr0 += 4; } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _sum2 = _bias0; float32x4_t _sum3 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _r08 = vld1q_f32(r0 + 32); float32x4_t _r09 = vld1q_f32(r0 + 36); float32x4_t _r010 = vld1q_f32(r0 + 40); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r02); _sum1 = vmlaq_f32(_sum1, _k01, _r03); _sum1 = vmlaq_f32(_sum1, _k02, _r04); _sum1 = vmlaq_f32(_sum1, _k03, _r05); _sum1 = vmlaq_f32(_sum1, _k04, _r06); _sum2 = vmlaq_f32(_sum2, _k00, _r04); _sum2 = vmlaq_f32(_sum2, _k01, _r05); _sum2 = vmlaq_f32(_sum2, _k02, _r06); _sum2 = vmlaq_f32(_sum2, _k03, _r07); _sum2 = vmlaq_f32(_sum2, _k04, _r08); _sum3 = vmlaq_f32(_sum3, _k00, _r06); _sum3 = vmlaq_f32(_sum3, _k01, _r07); _sum3 = vmlaq_f32(_sum3, _k02, _r08); _sum3 = vmlaq_f32(_sum3, _k03, _r09); _sum3 = vmlaq_f32(_sum3, _k04, _r010); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _r17 = vld1q_f32(r1 + 28); float32x4_t _r18 = vld1q_f32(r1 + 32); float32x4_t _r19 = vld1q_f32(r1 + 36); float32x4_t _r110 = vld1q_f32(r1 + 40); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r12); _sum1 = vmlaq_f32(_sum1, _k11, _r13); _sum1 = vmlaq_f32(_sum1, _k12, _r14); _sum1 = vmlaq_f32(_sum1, _k13, _r15); _sum1 = vmlaq_f32(_sum1, _k14, _r16); _sum2 = vmlaq_f32(_sum2, _k10, _r14); _sum2 = vmlaq_f32(_sum2, _k11, _r15); _sum2 = vmlaq_f32(_sum2, _k12, _r16); _sum2 = vmlaq_f32(_sum2, _k13, _r17); _sum2 = vmlaq_f32(_sum2, _k14, _r18); _sum3 = vmlaq_f32(_sum3, _k10, _r16); _sum3 = vmlaq_f32(_sum3, _k11, _r17); _sum3 = vmlaq_f32(_sum3, _k12, _r18); _sum3 = vmlaq_f32(_sum3, _k13, _r19); _sum3 = vmlaq_f32(_sum3, _k14, _r110); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _r27 = vld1q_f32(r2 + 28); float32x4_t _r28 = vld1q_f32(r2 + 32); float32x4_t _r29 = vld1q_f32(r2 + 36); float32x4_t _r210 = vld1q_f32(r2 + 40); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r22); _sum1 = vmlaq_f32(_sum1, _k21, _r23); _sum1 = vmlaq_f32(_sum1, _k22, _r24); _sum1 = vmlaq_f32(_sum1, _k23, _r25); _sum1 = vmlaq_f32(_sum1, _k24, _r26); _sum2 = vmlaq_f32(_sum2, _k20, _r24); _sum2 = vmlaq_f32(_sum2, _k21, _r25); _sum2 = vmlaq_f32(_sum2, _k22, _r26); _sum2 = vmlaq_f32(_sum2, _k23, _r27); _sum2 = vmlaq_f32(_sum2, _k24, _r28); _sum3 = vmlaq_f32(_sum3, _k20, _r26); _sum3 = vmlaq_f32(_sum3, _k21, _r27); _sum3 = vmlaq_f32(_sum3, _k22, _r28); _sum3 = vmlaq_f32(_sum3, _k23, _r29); _sum3 = vmlaq_f32(_sum3, _k24, _r210); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _r37 = vld1q_f32(r3 + 28); float32x4_t _r38 = vld1q_f32(r3 + 32); float32x4_t _r39 = vld1q_f32(r3 + 36); float32x4_t _r310 = vld1q_f32(r3 + 40); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r32); _sum1 = vmlaq_f32(_sum1, _k31, _r33); _sum1 = vmlaq_f32(_sum1, _k32, _r34); _sum1 = vmlaq_f32(_sum1, _k33, _r35); _sum1 = vmlaq_f32(_sum1, _k34, _r36); _sum2 = vmlaq_f32(_sum2, _k30, _r34); _sum2 = vmlaq_f32(_sum2, _k31, _r35); _sum2 = vmlaq_f32(_sum2, _k32, _r36); _sum2 = vmlaq_f32(_sum2, _k33, _r37); _sum2 = vmlaq_f32(_sum2, _k34, _r38); _sum3 = vmlaq_f32(_sum3, _k30, _r36); _sum3 = vmlaq_f32(_sum3, _k31, _r37); _sum3 = vmlaq_f32(_sum3, _k32, _r38); _sum3 = vmlaq_f32(_sum3, _k33, _r39); _sum3 = vmlaq_f32(_sum3, _k34, _r310); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _r47 = vld1q_f32(r4 + 28); float32x4_t _r48 = vld1q_f32(r4 + 32); float32x4_t _r49 = vld1q_f32(r4 + 36); float32x4_t _r410 = vld1q_f32(r4 + 40); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r42); _sum1 = vmlaq_f32(_sum1, _k41, _r43); _sum1 = vmlaq_f32(_sum1, _k42, _r44); _sum1 = vmlaq_f32(_sum1, _k43, _r45); _sum1 = vmlaq_f32(_sum1, _k44, _r46); _sum2 = vmlaq_f32(_sum2, _k40, _r44); _sum2 = vmlaq_f32(_sum2, _k41, _r45); _sum2 = vmlaq_f32(_sum2, _k42, _r46); _sum2 = vmlaq_f32(_sum2, _k43, _r47); _sum2 = vmlaq_f32(_sum2, _k44, _r48); _sum3 = vmlaq_f32(_sum3, _k40, _r46); _sum3 = vmlaq_f32(_sum3, _k41, _r47); _sum3 = vmlaq_f32(_sum3, _k42, _r48); _sum3 = vmlaq_f32(_sum3, _k43, _r49); _sum3 = vmlaq_f32(_sum3, _k44, _r410); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); vst1q_f32(outptr0 + 8, _sum2); vst1q_f32(outptr0 + 12, _sum3); r0 += 8 * 4; r1 += 8 * 4; r2 += 8 * 4; r3 += 8 * 4; r4 += 8 * 4; outptr0 += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r02); _sum1 = vmlaq_f32(_sum1, _k01, _r03); _sum1 = vmlaq_f32(_sum1, _k02, _r04); _sum1 = vmlaq_f32(_sum1, _k03, _r05); _sum1 = vmlaq_f32(_sum1, _k04, _r06); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r12); _sum1 = vmlaq_f32(_sum1, _k11, _r13); _sum1 = vmlaq_f32(_sum1, _k12, _r14); _sum1 = vmlaq_f32(_sum1, _k13, _r15); _sum1 = vmlaq_f32(_sum1, _k14, _r16); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r22); _sum1 = vmlaq_f32(_sum1, _k21, _r23); _sum1 = vmlaq_f32(_sum1, _k22, _r24); _sum1 = vmlaq_f32(_sum1, _k23, _r25); _sum1 = vmlaq_f32(_sum1, _k24, _r26); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r32); _sum1 = vmlaq_f32(_sum1, _k31, _r33); _sum1 = vmlaq_f32(_sum1, _k32, _r34); _sum1 = vmlaq_f32(_sum1, _k33, _r35); _sum1 = vmlaq_f32(_sum1, _k34, _r36); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r42); _sum1 = vmlaq_f32(_sum1, _k41, _r43); _sum1 = vmlaq_f32(_sum1, _k42, _r44); _sum1 = vmlaq_f32(_sum1, _k43, _r45); _sum1 = vmlaq_f32(_sum1, _k44, _r46); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; outptr0 += 8; } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); vst1q_f32(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; r3 += 2 * 4; r4 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
DRB067-restrictpointer1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* restrict pointers: no aliasing Array initialization using assignments. C99 is needed to compile this code e.g. gcc -std=c99 -c Stress-1.c */ #include <stdlib.h> typedef double real8; void foo(real8 * restrict newSxx, real8 * restrict newSyy, int length) { int i; #pragma omp parallel for private (i) firstprivate (length) for (i = 0; i <= length - 1; i += 1) { newSxx[i] = 0.0; newSyy[i] = 0.0; } } int main() { int length=1000; real8* newSxx = malloc (length* sizeof (real8)); real8* newSyy = malloc (length* sizeof (real8)); foo(newSxx, newSyy, length); free (newSxx); free (newSyy); return 0; }
util.h
#ifndef _C_UTIL_ #define _C_UTIL_ #include <math.h> #include <iostream> //------------------------------------------------------------------- //--initialize array with maximum limit //------------------------------------------------------------------- template<typename datatype> void fill(datatype *A, const int n, const datatype maxi){ for (int j = 0; j < n; j++) { A[j] = ((datatype) maxi * (rand() / (RAND_MAX + 1.0f))); } } //--print matrix template<typename datatype> void print_matrix(datatype *A, int height, int width){ for(int i=0; i<height; i++){ for(int j=0; j<width; j++){ int idx = i*width + j; std::cout<<A[idx]<<" "; } std::cout<<std::endl; } return; } //------------------------------------------------------------------- //--verify results //------------------------------------------------------------------- #define MAX_RELATIVE_ERROR .002 template<typename datatype> void verify_array(const datatype *cpuResults, const datatype *gpuResults, const int size){ char passed = true; #pragma omp parallel for for (int i=0; i<size; i++){ if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR){ passed = false; } } if (passed){ std::cout << "--cambine:passed:-)" << std::endl; } else{ std::cout << "--cambine: failed:-(" << std::endl; } return ; } template<typename datatype> void compare_results(const datatype *cpu_results, const datatype *gpu_results, const int size){ char passed = true; //#pragma omp parallel for for (int i=0; i<size; i++){ if (cpu_results[i]!=gpu_results[i]){ passed = false; } if(i<10) printf("results cpu_results[%d]=%d gpu_results[%d]=%d\n",i, cpu_results[i], i, gpu_results[i]); } if (passed){ std::cout << "--cambine: passed: -)" << std::endl; } else{ std::cout << "--cambine: failed :-(" << std::endl; } return ; } #endif