source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unaryop__lnot_int32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_bool // op(A') function: GB_tran__lnot_int32_bool // C type: int32_t // A type: bool // cast: int32_t cij = (int32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_bool ( int32_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test-float-libmvec-sincosf-main.c
/* Test for vector sincosf ABI. Copyright (C) 2016-2017 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <math.h> #define N 1000 float x[N], s[N], c[N]; float *s_ptrs[N]; float *c_ptrs[N]; int test_sincosf_abi (void) { int i; for(i = 0; i < N; i++) { x[i] = i / 3; s_ptrs[i] = &s[i]; c_ptrs[i] = &c[i]; } #pragma omp simd for(i = 0; i < N; i++) sincosf (x[i], s_ptrs[i], c_ptrs[i]); return 0; }
FTMTree_MT_Template.h
/// \ingroup base /// \class ttk::FTMTree_MT /// \author Charles Gueunet <charles.gueunet@lip6.fr> /// \date June 2016. /// ///\brief TTK processing package that efficiently computes the /// sublevel set tree of scalar data and more /// (data segmentation, topological simplification, /// persistence diagrams, persistence curves, etc.). /// ///\param dataType Data type of the input scalar field (char, float, /// etc.). /// /// \sa ttkContourForests.cpp %for a usage example. #ifndef FTMTREE_MT_TPL_H #define FTMTREE_MT_TPL_H #include <functional> #include "FTMTree_MT.h" // ---- // Init // ---- namespace ttk { namespace ftm { template <typename scalarType, typename idType> void ftm::FTMTree_MT::sortInput(void) { const auto &nbVertices = scalars_->size; auto *sortedVect = scalars_->sortedVertices.get(); if (sortedVect == nullptr) { sortedVect = new std::vector<SimplexId>(0); scalars_->sortedVertices.reset(sortedVect); } else { sortedVect->clear(); } auto indirect_sort = [&](const size_t &a, const size_t &b) { return isLower<scalarType,idType>(a, b); }; sortedVect->resize(nbVertices, 0); std::iota(sortedVect->begin(), sortedVect->end(), 0); // #pragma omp parallel // #pragma omp single // scalars_->qsort<SimplexId>(sortedVect->data(), 0, scalars_->size -1, indirect_sort); #ifdef TTK_ENABLE_OPENMP # ifdef __clang__ std::cout << "Caution, outside GCC, sequential sort" << std::endl; std::sort(sortedVect->begin(), sortedVect->end(), indirect_sort); # else __gnu_parallel::sort(sortedVect->begin(), sortedVect->end(), indirect_sort); # endif #else std::sort(sortedVect->begin(), sortedVect->end(), indirect_sort); #endif auto *mirrorVert = scalars_->mirrorVertices.get(); if (mirrorVert == nullptr) { mirrorVert = new std::vector<SimplexId>(0); scalars_->mirrorVertices.reset(mirrorVert); } else { mirrorVert->clear(); } scalars_->mirrorVertices->resize(nbVertices); #ifdef TTK_ENABLE_OPENMP #pragma omp parallel for #endif for (SimplexId i = 0; i < nbVertices; i++) { (*scalars_->mirrorVertices)[(*sortedVect)[i]] = i; } } } } // Process #endif /* end of include guard: FTMTREE_MT_TPL_H */
GB_unop__identity_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_int8) // op(A') function: GB (_unop_tran__identity_uint64_int8) // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_int8) ( uint64_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_monte_carlo_pi.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <time.h> int in_circle(float x, float y) { return (x - 0.5f) * (x - 0.5f) + (y - 0.5f) * (y - 0.5f) < 0.5f * 0.5f; } float monte_carlo_pi(size_t N) { int i, tid, nthreads; unsigned int seed; float x, y; float start, end; size_t N_circle = 0; size_t N_total_rounds = 0; start = omp_get_wtime(); #pragma omp parallel shared(N) private(i, tid, nthreads, seed, x, y) reduction(+: N_circle, N_total_rounds) { tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); seed = tid * (unsigned int) time(NULL); #pragma omp parallel for reduction(+: N_circle, N_total_rounds) for (i = 0; i < (N / nthreads + N % nthreads); ++i) { x = (float)(rand_r(&seed)) / RAND_MAX; y = (float)(rand_r(&seed)) / RAND_MAX; if (in_circle(x, y)) { N_circle++; } N_total_rounds++; // printf("tid = %d i = %d\n", tid, i); } } end = omp_get_wtime(); printf("N = %zu round of Monte-Carlo, elapsed time = %f ms\n", N, (end - start) * 1000); return ((float)(N_circle) / N) / (0.5f * 0.5f); } int main() { float pi; const size_t N = 100000000; pi = monte_carlo_pi(N); printf("Calculated PI = %f\n", pi); printf("True PI = %f\n", M_PI); printf("Diff = %f\n", fabs(pi - M_PI)); return 0; }
isx_original.c
/* Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _POSIX_C_SOURCE 199309L #include <shmem.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <math.h> #include <string.h> #include <unistd.h> // sleep() #include <sys/stat.h> #include <stdint.h> #include "params.h" #include "isx_original.h" #include "timer.h" #include "pcg_basic.h" #if defined(_OPENMP) #include <omp.h> #endif #define ROOT_PE 0 // Needed for shmem collective operations int pWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; double dWrk[_SHMEM_REDUCE_SYNC_SIZE]; long long int llWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; long pSync[_SHMEM_REDUCE_SYNC_SIZE]; uint64_t NUM_PES; // Number of parallel workers uint64_t TOTAL_KEYS; // Total number of keys across all PEs uint64_t NUM_KEYS_PER_PE; // Number of keys generated on each PE uint64_t NUM_BUCKETS; // The number of buckets in the bucket sort uint64_t BUCKET_WIDTH; // The size of each bucket uint64_t MAX_KEY_VAL; // The maximum possible generated key value volatile int whose_turn; long long int receive_offset = 0; long long int my_bucket_size = 0; #include <sys/time.h> #include <time.h> static unsigned long long current_time_ns() { struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; } #define PARALLEL_FOR_MODE SHMEM_PARALLEL_FOR_RECURSIVE_MODE #define CHUNKS_COUNT_LOCAL_KEYS (actual_num_workers) #define CHUNKS_MAKE_INPUT CHUNKS_PER_PE int actual_num_workers; int** local_bucket_sizes_chunk; int ** my_local_key_counts; KEY_TYPE*** my_local_bucketed_keys_chunk; int** local_bucket_offsets_chunk; /* * This variable sets the maximum number of chunks allowed * to participate in computation per pe. */ int CHUNKS_PER_PE=1; #define GET_VIRTUAL_RANK(rank, chunk) ((rank * actual_num_workers) + (chunk)) #define SHMEM_BARRIER_AT_START { timer_start(&timers[TIMER_BARRIER_START]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_START]); } #define SHMEM_BARRIER_AT_EXCHANGE { timer_start(&timers[TIMER_BARRIER_EXCHANGE]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_EXCHANGE]); } #define SHMEM_BARRIER_AT_END { timer_start(&timers[TIMER_BARRIER_END]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_END]); } // This is done due to current limitation that entrypoint function // cannot accept arguments. This will be resolved in future version of // AsyncSHMEM int m_argc; char** m_argv; #define EXTRA_STATS #ifdef EXTRA_STATS float avg_time=0, avg_time_all2all = 0; #endif // #define KEY_BUFFER_SIZE (1uLL<<28uLL) #define KEY_BUFFER_SIZE ((1uLL<<28uLL) + 70000) // The receive array for the All2All exchange // KEY_TYPE my_bucket_keys[KEY_BUFFER_SIZE]; KEY_TYPE *my_bucket_keys; #ifdef PERMUTE int * permute_array; #endif void entrypoint(void *arg) { char * log_file = parse_params(m_argc, m_argv); init_shmem_sync_array(pSync); bucket_sort(); log_times(log_file); //return err; } int main (int argc, char ** argv) { shmem_init (); m_argc = argc; m_argv = argv; #ifdef EXTRA_STATS _timer_t stage_time; if(shmem_my_pe() == 0) { printf("\n-----\nmkdir timedrun fake\n\n"); timer_start(&stage_time); } #endif #if defined(_SHMEM_WORKERS) shmem_workers_init(entrypoint, NULL); #else entrypoint(NULL); #endif #ifdef EXTRA_STATS if(shmem_my_pe() == 0) { just_timer_stop(&stage_time); double tTime = ( stage_time.stop.tv_sec - stage_time.start.tv_sec ) + ( stage_time.stop.tv_nsec - stage_time.start.tv_nsec )/1E9; avg_time *= 1000; avg_time_all2all *= 1000; printf("\n============================ MMTk Statistics Totals ============================\n"); if(NUM_ITERATIONS == 1) { //TODO: fix time calculation below for more number of iterations printf("time.mu\tt.ATA_KEYS\tt.MAKE_INPUT\tt.COUNT_BUCKET_SIZES\tt.BUCKETIZE\tt.COMPUTE_OFFSETS\tt.LOCAL_SORT\tBARRIER_AT_START\tBARRIER_AT_EXCHANGE\tBARRIER_AT_END\tnWorkers\tnPEs\n"); double TIMES[TIMER_NTIMERS]; memset(TIMES, 0x00, sizeof(double) * TIMER_NTIMERS); for(uint64_t i=0; i<NUM_PES; i++) { for(int t = 0; t < TIMER_NTIMERS; ++t){ if(timers[t].all_times != NULL){ TIMES[t] += timers[t].all_times[i]; } } } for(int t = 0; t < TIMER_NTIMERS; ++t){ printf("%.3f\t", (TIMES[t]/NUM_PES)*1000); } printf("%d\t%d\n",actual_num_workers,NUM_PES); printf("Total time: %.3f\n",(TIMES[0]/NUM_PES)*1000); } else { printf("time.mu\ttimeAll2All\tnWorkers\tnPEs\n"); printf("%.3f\t%.3f\t%d\t%d\n",avg_time,avg_time_all2all,actual_num_workers,NUM_PES); printf("Total time: %.3f\n",avg_time); } printf("------------------------------ End MMTk Statistics -----------------------------\n"); printf("===== TEST PASSED in %.3f msec =====\n",(tTime*1000)); } #endif shmem_finalize (); return 0; } // Parses all of the command line input and definitions in params.h // to set all necessary runtime values and options static char * parse_params(const int argc, char ** argv) { if(argc != 3) { if( shmem_my_pe() == 0){ printf("Usage: \n"); printf(" ./%s <total num keys(strong) | keys per pe(weak)> <log_file>\n",argv[0]); } shmem_finalize(); exit(1); } const char* chunks_env = getenv("ISX_PE_CHUNKS"); CHUNKS_PER_PE = chunks_env ? atoi(chunks_env) : 1; #if defined(_OPENMP) #pragma omp parallel actual_num_workers = omp_get_num_threads(); #elif defined(_SHMEM_WORKERS) actual_num_workers = shmem_n_workers(); #else CHUNKS_PER_PE = 1; actual_num_workers = 1; #endif NUM_PES = (uint64_t) shmem_n_pes(); MAX_KEY_VAL = DEFAULT_MAX_KEY; NUM_BUCKETS = NUM_PES; BUCKET_WIDTH = (uint64_t) ceil((double)MAX_KEY_VAL/NUM_BUCKETS); char * log_file = argv[2]; char scaling_msg[64]; switch(SCALING_OPTION){ case STRONG: { TOTAL_KEYS = (uint64_t) atoi(argv[1]); NUM_KEYS_PER_PE = (uint64_t) ceil((double)TOTAL_KEYS/NUM_PES); sprintf(scaling_msg,"STRONG"); break; } case WEAK: { NUM_KEYS_PER_PE = (uint64_t) (atoi(argv[1])) * actual_num_workers; sprintf(scaling_msg,"WEAK"); break; } case WEAK_ISOBUCKET: { NUM_KEYS_PER_PE = (uint64_t) (atoi(argv[1])) * actual_num_workers; BUCKET_WIDTH = ISO_BUCKET_WIDTH; MAX_KEY_VAL = (uint64_t) (NUM_PES * actual_num_workers * BUCKET_WIDTH); sprintf(scaling_msg,"WEAK_ISOBUCKET"); break; } default: { if(shmem_my_pe() == 0){ printf("Invalid scaling option! See params.h to define the scaling option.\n"); } shmem_finalize(); exit(1); break; } } assert(NUM_KEYS_PER_PE % actual_num_workers == 0); assert(MAX_KEY_VAL > 0); assert(NUM_KEYS_PER_PE > 0); assert(NUM_PES > 0); assert(MAX_KEY_VAL > NUM_PES); assert(NUM_BUCKETS > 0); assert(BUCKET_WIDTH > 0); if(shmem_my_pe() == 0){ printf("ISx v%1d.%1d\n",MAJOR_VERSION_NUMBER,MINOR_VERSION_NUMBER); #ifdef PERMUTE printf("Random Permute Used in ATA.\n"); #endif printf(" Number of Keys per PE: %" PRIu64 "\n", NUM_KEYS_PER_PE); printf(" Number of Chunks per PE (ISX_PE_CHUNKS): %d\n",CHUNKS_PER_PE); #if defined(_OPENMP) printf(" OpenMP Version, total workers: %d\n",actual_num_workers); #elif defined(_SHMEM_WORKERS) printf(" AsyncSHMEM Version, total workers: %d\n",actual_num_workers); #else printf(" AsyncSHMEM Sequential version\n"); #endif printf(" Max Key Value: %" PRIu64 "\n", MAX_KEY_VAL); printf(" Bucket Width: %" PRIu64 "\n", BUCKET_WIDTH); printf(" Number of Iterations: %u\n", NUM_ITERATIONS); printf(" Number of PEs: %" PRIu64 "\n", NUM_PES); printf(" %s Scaling!\n",scaling_msg); } return log_file; } /* * The primary compute function for the bucket sort * Executes the sum of NUM_ITERATIONS + BURN_IN iterations, as defined in params.h * Only iterations after the BURN_IN iterations are timed * Only the final iteration calls the verification function */ static int bucket_sort(void) { int err = 0; init_timers(NUM_ITERATIONS); #ifdef PERMUTE create_permutation_array(); #endif my_bucket_keys = (KEY_TYPE*) shmem_malloc(KEY_BUFFER_SIZE * sizeof(KEY_TYPE)); assert(my_bucket_keys); // if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", KEY_BUFFER_SIZE * sizeof(KEY_TYPE)); my_local_key_counts = malloc(CHUNKS_COUNT_LOCAL_KEYS * sizeof(int*)); assert(my_local_key_counts); for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) { my_local_key_counts[i] = malloc(BUCKET_WIDTH * sizeof(int)); assert(my_local_key_counts[i]); } for(uint64_t i = 0; i < (NUM_ITERATIONS + BURN_IN); ++i) { for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) memset(my_local_key_counts[i], 0x00, BUCKET_WIDTH * sizeof(int)); local_bucket_sizes_chunk = malloc(CHUNKS_PER_PE* sizeof(int*)); assert(local_bucket_sizes_chunk); // Reset timers after burn in if(i == BURN_IN){ init_timers(NUM_ITERATIONS); } SHMEM_BARRIER_AT_START; timer_start(&timers[TIMER_TOTAL]); KEY_TYPE * my_keys = make_input(); int * local_bucket_sizes = count_local_bucket_sizes(my_keys); int * send_offsets; int * local_bucket_offsets = compute_local_bucket_offsets(local_bucket_sizes, &send_offsets); KEY_TYPE * my_local_bucketed_keys = bucketize_local_keys(my_keys, local_bucket_offsets); KEY_TYPE * my_bucket_keys = exchange_keys(send_offsets, local_bucket_sizes, my_local_bucketed_keys); my_bucket_size = receive_offset; count_local_keys(my_bucket_keys); SHMEM_BARRIER_AT_END; timer_stop(&timers[TIMER_TOTAL]); for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) { free(local_bucket_sizes_chunk[chunk]); } free(local_bucket_sizes_chunk); // Only the last iteration is verified if(i == NUM_ITERATIONS) { err = verify_results(my_bucket_keys); } // Reset receive_offset used in exchange_keys receive_offset = 0; free(my_local_bucketed_keys); free(my_keys); free(local_bucket_sizes); free(local_bucket_offsets); free(send_offsets); shmem_barrier_all(); } for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) free(my_local_key_counts[i]); free(my_local_key_counts); return err; } #if defined(_SHMEM_WORKERS) void make_input_async(void *args, int chunk) { KEY_TYPE * restrict const my_keys = *((KEY_TYPE **) args); const uint64_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_MAKE_INPUT; const uint64_t start_index = chunk * keys_per_chunk; const uint64_t max_index = start_index + keys_per_chunk; pcg32_random_t rng = seed_my_chunk(chunk); KEY_TYPE * restrict my_keys_1D = &(my_keys[start_index]); for(uint64_t i=start_index; i<max_index; i++) { *my_keys_1D = pcg32_boundedrand_r(&rng, MAX_KEY_VAL); my_keys_1D += 1; } } #endif /* * Generates uniformly random keys [0, MAX_KEY_VAL] on each rank using the time and rank * number as a seed */ static KEY_TYPE * make_input(void) { timer_start(&timers[TIMER_INPUT]); KEY_TYPE * restrict const my_keys = malloc(NUM_KEYS_PER_PE * sizeof(KEY_TYPE)); assert(my_keys); #if defined(_SHMEM_WORKERS) int lowBound = 0; int highBound = CHUNKS_MAKE_INPUT; int stride = 1; int tile_size = 1; int loop_dimension = 1; shmem_task_scope_begin(); shmem_parallel_for_nbi(make_input_async, (void*)(&my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE); shmem_task_scope_end(); #else #if defined(_OPENMP) int chunk; #pragma omp parallel for private(chunk) schedule (dynamic,1) #endif for(chunk=0; chunk<CHUNKS_MAKE_INPUT; chunk++) { const uint64_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_MAKE_INPUT; const uint64_t start_index = chunk * keys_per_chunk; const uint64_t max_index = start_index + keys_per_chunk; pcg32_random_t rng = seed_my_chunk(chunk); KEY_TYPE * restrict my_keys_1D = &(my_keys[start_index]); for(uint64_t i=start_index; i<max_index; i++) { *my_keys_1D = pcg32_boundedrand_r(&rng, MAX_KEY_VAL); my_keys_1D += 1; } } #endif timer_stop(&timers[TIMER_INPUT]); #ifdef DEBUG wait_my_turn(); char msg[1024]; const int my_rank = shmem_my_pe(); sprintf(msg,"Rank %d: Initial Keys: ", my_rank); for(uint64_t i = 0; i < NUM_KEYS_PER_PE; ++i){ if(i < PRINT_MAX) sprintf(msg + strlen(msg),"%d ", my_keys[i]); } sprintf(msg + strlen(msg),"\n"); printf("%s",msg); fflush(stdout); my_turn_complete(); #endif return my_keys; } #if defined(_SHMEM_WORKERS) void count_local_bucket_sizes_async(void* args, int chunk) { KEY_TYPE const * restrict const my_keys = (KEY_TYPE *) args; local_bucket_sizes_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int)); assert(local_bucket_sizes_chunk[chunk]); memset(local_bucket_sizes_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int)); int * restrict const local_bucket_sizes = local_bucket_sizes_chunk[chunk]; const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE; const uint32_t start_index = chunk * keys_per_chunk; KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]); for(uint64_t i = 0; i < keys_per_chunk; ++i){ const uint32_t bucket_index = my_keys_1D[i]/BUCKET_WIDTH; local_bucket_sizes[bucket_index]++; } } #endif /* * Computes the size of each bucket by iterating all keys and incrementing * their corresponding bucket's size */ static inline int * count_local_bucket_sizes(KEY_TYPE const * restrict const my_keys) { int * restrict const local_bucket_sizes = malloc(NUM_BUCKETS * sizeof(int)); assert(local_bucket_sizes); memset(local_bucket_sizes, 0x00, NUM_BUCKETS * sizeof(int)); timer_start(&timers[TIMER_BCOUNT]); init_array(local_bucket_sizes, NUM_BUCKETS); if(NUM_BUCKETS == 1) { local_bucket_sizes[0] = NUM_KEYS_PER_PE; } else { #if defined(_SHMEM_WORKERS) int lowBound = 0; int highBound = CHUNKS_PER_PE; int stride = 1; int tile_size = 1; int loop_dimension = 1; shmem_task_scope_begin(); shmem_parallel_for_nbi(count_local_bucket_sizes_async, (void*)(my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE); shmem_task_scope_end(); #else #if defined(_OPENMP) int chunk; #pragma omp parallel for private(chunk) schedule (dynamic,1) #endif for(chunk=0; chunk<CHUNKS_PER_PE; chunk++) { local_bucket_sizes_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int)); assert(local_bucket_sizes_chunk[chunk]); memset(local_bucket_sizes_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int)); int * restrict const local_bucket_sizes = local_bucket_sizes_chunk[chunk]; const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE; const uint32_t start_index = chunk * keys_per_chunk; KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]); for(uint64_t i = 0; i < keys_per_chunk; ++i){ const uint32_t bucket_index = my_keys_1D[i]/BUCKET_WIDTH; local_bucket_sizes[bucket_index]++; } } #endif for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) { for(int i=0; i<NUM_BUCKETS; i++) { local_bucket_sizes[i] += local_bucket_sizes_chunk[chunk][i]; } } } timer_stop(&timers[TIMER_BCOUNT]); #ifdef DEBUG wait_my_turn(); char msg[1024]; const int my_rank = shmem_my_pe(); sprintf(msg,"Rank %d: local bucket sizes: ", my_rank); for(uint64_t i = 0; i < NUM_BUCKETS; ++i){ if(i < PRINT_MAX) sprintf(msg + strlen(msg),"%d ", local_bucket_sizes[i]); } sprintf(msg + strlen(msg),"\n"); printf("%s",msg); fflush(stdout); my_turn_complete(); #endif return local_bucket_sizes; } /* * Computes the prefix scan of the bucket sizes to determine the starting locations * of each bucket in the local bucketed array * Stores a copy of the bucket offsets for use in exchanging keys because the * original bucket_offsets array is modified in the bucketize function */ static inline int * compute_local_bucket_offsets(int const * restrict const local_bucket_sizes, int ** restrict send_offsets) { int * restrict const local_bucket_offsets = malloc(NUM_BUCKETS * sizeof(int)); assert(local_bucket_offsets); timer_start(&timers[TIMER_BOFFSET]); (*send_offsets) = malloc(NUM_BUCKETS * sizeof(int)); assert(*send_offsets); // NOTE: This is a very small computation and hence we are not parallelizing this local_bucket_offsets[0] = 0; (*send_offsets)[0] = 0; int temp = 0; for(uint64_t i = 1; i < NUM_BUCKETS; i++){ temp = local_bucket_offsets[i-1] + local_bucket_sizes[i-1]; local_bucket_offsets[i] = temp; (*send_offsets)[i] = temp; } timer_stop(&timers[TIMER_BOFFSET]); #ifdef DEBUG wait_my_turn(); char msg[1024]; const int my_rank = shmem_my_pe(); sprintf(msg,"Rank %d: local bucket offsets: ", my_rank); for(uint64_t i = 0; i < NUM_BUCKETS; ++i){ if(i < PRINT_MAX) sprintf(msg + strlen(msg),"%d ", local_bucket_offsets[i]); } sprintf(msg + strlen(msg),"\n"); printf("%s",msg); fflush(stdout); my_turn_complete(); #endif return local_bucket_offsets; } #if defined(_SHMEM_WORKERS) void bucketize_local_keys_async(void* args, int chunk) { KEY_TYPE const * restrict const my_keys = (KEY_TYPE*) args; my_local_bucketed_keys_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(KEY_TYPE*)); assert(my_local_bucketed_keys_chunk[chunk]); local_bucket_offsets_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int)); assert(local_bucket_offsets_chunk[chunk]); memset(local_bucket_offsets_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int)); for(int bucket=0; bucket<NUM_BUCKETS; bucket++) { my_local_bucketed_keys_chunk[chunk][bucket] = malloc(sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]); assert(my_local_bucketed_keys_chunk[chunk][bucket]); } const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE; const uint32_t start_index = chunk * keys_per_chunk; KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]); int * restrict local_bucket_offsets_chunk_1D = local_bucket_offsets_chunk[chunk]; int const * restrict const local_bucket_sizes_chunk_1D = local_bucket_sizes_chunk[chunk]; KEY_TYPE** restrict my_local_bucketed_keys_chunk_2D = my_local_bucketed_keys_chunk[chunk]; for(uint64_t i = 0; i < keys_per_chunk; ++i){ const KEY_TYPE key = my_keys_1D[i]; const uint32_t bucket_index = key / BUCKET_WIDTH; uint32_t index = local_bucket_offsets_chunk_1D[bucket_index]++; assert(index < local_bucket_sizes_chunk_1D[bucket_index]); my_local_bucketed_keys_chunk_2D[bucket_index][index] = key; } } #endif /* * Places local keys into their corresponding local bucket. * The contents of each bucket are not sorted. */ static inline KEY_TYPE * bucketize_local_keys(KEY_TYPE const * restrict const my_keys, int * restrict const local_bucket_offsets) { KEY_TYPE * restrict const my_local_bucketed_keys = malloc(NUM_KEYS_PER_PE * sizeof(KEY_TYPE)); assert(my_local_bucketed_keys); timer_start(&timers[TIMER_BUCKETIZE]); my_local_bucketed_keys_chunk = malloc(CHUNKS_PER_PE* sizeof(KEY_TYPE**)); assert(my_local_bucketed_keys_chunk); local_bucket_offsets_chunk = (int**) malloc(CHUNKS_PER_PE* sizeof(int*)); assert(local_bucket_offsets_chunk); #if defined(_SHMEM_WORKERS) int lowBound = 0; int highBound = CHUNKS_PER_PE; int stride = 1; int tile_size = 1; int loop_dimension = 1; shmem_task_scope_begin(); shmem_parallel_for_nbi(bucketize_local_keys_async, (void*)(my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE); shmem_task_scope_end(); #else #if defined(_OPENMP) int chunk; #pragma omp parallel for private(chunk) schedule (dynamic,1) #endif for(chunk=0; chunk<CHUNKS_PER_PE; chunk++) { my_local_bucketed_keys_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(KEY_TYPE*)); assert(my_local_bucketed_keys_chunk[chunk]); local_bucket_offsets_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int)); assert(local_bucket_offsets_chunk[chunk]); memset(local_bucket_offsets_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int)); for(int bucket=0; bucket<NUM_BUCKETS; bucket++) { my_local_bucketed_keys_chunk[chunk][bucket] = malloc(sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]); assert(my_local_bucketed_keys_chunk[chunk][bucket]); } const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE; const uint32_t start_index = chunk * keys_per_chunk; KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]); int * restrict local_bucket_offsets_chunk_1D = local_bucket_offsets_chunk[chunk]; int const * restrict const local_bucket_sizes_chunk_1D = local_bucket_sizes_chunk[chunk]; KEY_TYPE** restrict my_local_bucketed_keys_chunk_2D = my_local_bucketed_keys_chunk[chunk]; for(uint64_t i = 0; i < keys_per_chunk; ++i){ const KEY_TYPE key = my_keys_1D[i]; const uint32_t bucket_index = key / BUCKET_WIDTH; uint32_t index = local_bucket_offsets_chunk_1D[bucket_index]++; assert(index < local_bucket_sizes_chunk_1D[bucket_index]); my_local_bucketed_keys_chunk_2D[bucket_index][index] = key; } } #endif for(int bucket=0; bucket<NUM_BUCKETS; bucket++) { uint32_t index = local_bucket_offsets[bucket]; for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) { memcpy(&(my_local_bucketed_keys[index]), my_local_bucketed_keys_chunk[chunk][bucket], sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]); index += local_bucket_sizes_chunk[chunk][bucket]; } local_bucket_offsets[bucket] = index; } // free the memory for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) { for(int bucket=0; bucket<NUM_BUCKETS; bucket++) { free(my_local_bucketed_keys_chunk[chunk][bucket]); } free(local_bucket_offsets_chunk[chunk]); free(my_local_bucketed_keys_chunk[chunk]); } free(my_local_bucketed_keys_chunk); free(local_bucket_offsets_chunk); timer_stop(&timers[TIMER_BUCKETIZE]); #ifdef DEBUG wait_my_turn(); char msg[1024]; const int my_rank = shmem_my_pe(); sprintf(msg,"Rank %d: local bucketed keys: ", my_rank); for(uint64_t i = 0; i < NUM_KEYS_PER_PE; ++i){ if(i < PRINT_MAX) sprintf(msg + strlen(msg),"%d ", my_local_bucketed_keys[i]); } sprintf(msg + strlen(msg),"\n"); printf("%s",msg); fflush(stdout); my_turn_complete(); #endif return my_local_bucketed_keys; } #if defined(_SHMEM_WORKERS) typedef struct exchange_keys_async_t { KEY_TYPE const * restrict const my_local_bucketed_keys; const long long int max_bucket_size; const long long int send_offsets_start; const long long int write_offset_into_self; } exchange_keys_async_t; void exchange_keys_async(void* args, int chunk) { exchange_keys_async_t* arg = (exchange_keys_async_t*) args; KEY_TYPE const * restrict const my_local_bucketed_keys = arg->my_local_bucketed_keys; const long long int max_bucket_size = arg->max_bucket_size; const long long int send_offsets_start = arg->send_offsets_start; const long long int write_offset_into_self = arg->write_offset_into_self; const long long int chunks = max_bucket_size / actual_num_workers; const long long int write_offset_into_self_worker = write_offset_into_self + (chunk * chunks); const long long int send_offsets_start_worker = send_offsets_start + (chunk * chunks); long long int send_size = chunks; if(chunk+1 == actual_num_workers) { long long int leftover = max_bucket_size - (chunks * actual_num_workers); send_size += leftover; } memcpy(&my_bucket_keys[write_offset_into_self_worker],&my_local_bucketed_keys[send_offsets_start_worker], send_size*sizeof(KEY_TYPE)); } #endif /* * Each PE sends the contents of its local buckets to the PE that owns that bucket. */ static inline KEY_TYPE * exchange_keys(int const * restrict const send_offsets, int const * restrict const local_bucket_sizes, KEY_TYPE const * restrict const my_local_bucketed_keys) { timer_start(&timers[TIMER_ATA_KEYS]); const int my_rank = shmem_my_pe(); unsigned int total_keys_sent = 0; unsigned long long start_time = current_time_ns(); for(uint64_t i = 0; i < NUM_PES; ++i){ #ifdef PERMUTE const int target_pe = permute_array[i]; #elif INCAST const int target_pe = i; #else const int target_pe = (my_rank + i) % NUM_PES; #endif // Local keys already written with memcpy if(target_pe == my_rank){ continue; } const int read_offset_from_self = send_offsets[target_pe]; const int my_send_size = local_bucket_sizes[target_pe]; const long long int write_offset_into_target = shmem_longlong_fadd(&receive_offset, (long long int)my_send_size, target_pe); if (write_offset_into_target + my_send_size > KEY_BUFFER_SIZE) { fprintf(stderr, "%llu %llu\n", write_offset_into_target + my_send_size, KEY_BUFFER_SIZE); exit(1); } if (read_offset_from_self + my_send_size > NUM_KEYS_PER_PE) { fprintf(stderr, "%llu %llu\n", read_offset_from_self + my_send_size, NUM_KEYS_PER_PE); exit(1); } if (shmem_my_pe() == 0) { fprintf(stderr, "Putting %llu integers to PE %d\n", my_send_size, target_pe); } shmem_int_put(&(my_bucket_keys[write_offset_into_target]), &(my_local_bucketed_keys[read_offset_from_self]), my_send_size, target_pe); #ifdef DEBUG printf("Rank: %d Target: %d Offset into target: %lld Offset into myself: %d Send Size: %d\n", my_rank, target_pe, write_offset_into_target, read_offset_from_self, my_send_size); #endif total_keys_sent += my_send_size; } unsigned long long intermediate_time = current_time_ns(); // Keys destined for local key buffer can be written with memcpy const long long int write_offset_into_self = shmem_longlong_fadd(&receive_offset, (long long int)local_bucket_sizes[my_rank], my_rank); const long long int send_offsets_start = send_offsets[my_rank]; const long long int chunks = local_bucket_sizes[my_rank] / actual_num_workers; const long long int max_bucket_size = local_bucket_sizes[my_rank]; #if defined(_SHMEM_WORKERS) int lowBound = 0; int highBound = actual_num_workers; int stride = 1; int tile_size = 1; int loop_dimension = 1; exchange_keys_async_t args = {my_local_bucketed_keys, max_bucket_size, send_offsets_start, write_offset_into_self}; shmem_task_scope_begin(); shmem_parallel_for_nbi(exchange_keys_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE); shmem_task_scope_end(); #else #if defined(_OPENMP) int chunk; #pragma omp parallel for private(chunk) schedule (dynamic,1) #endif for(chunk=0; chunk<actual_num_workers; chunk++) { const long long int write_offset_into_self_worker = write_offset_into_self + (chunk * chunks); const long long int send_offsets_start_worker = send_offsets_start + (chunk * chunks); long long int send_size = chunks; if(chunk+1 == actual_num_workers) { long long int leftover = max_bucket_size - (chunks * actual_num_workers); send_size += leftover; } memcpy(&my_bucket_keys[write_offset_into_self_worker],&my_local_bucketed_keys[send_offsets_start_worker], send_size*sizeof(KEY_TYPE)); } #endif unsigned long long end_time = current_time_ns(); if (shmem_my_pe() == 0) { fprintf(stderr, "Time slices = %llu %llu\n", intermediate_time - start_time, end_time - intermediate_time); } #ifdef BARRIER_ATA SHMEM_BARRIER_AT_EXCHANGE; #endif timer_stop(&timers[TIMER_ATA_KEYS]); timer_count(&timers[TIMER_ATA_KEYS], total_keys_sent); #ifdef DEBUG wait_my_turn(); char msg[1024]; sprintf(msg,"Rank %d: Bucket Size %lld | Total Keys Sent: %u | Keys after exchange:", my_rank, receive_offset, total_keys_sent); for(long long int i = 0; i < receive_offset; ++i){ if(i < PRINT_MAX) sprintf(msg + strlen(msg),"%d ", my_bucket_keys[i]); } sprintf(msg + strlen(msg),"\n"); printf("%s",msg); fflush(stdout); my_turn_complete(); #endif return my_bucket_keys; } #if defined(_SHMEM_WORKERS) typedef struct count_local_keys_async_t { int max_chunks; int my_min_key; } count_local_keys_async_t; void count_local_keys_async(void* args, int chunk) { count_local_keys_async_t* arg = (count_local_keys_async_t*) args; const int max_chunks = arg->max_chunks; const int my_min_key = arg->my_min_key; const int start_index = chunk * max_chunks; int * restrict my_local_key_counts_1D = my_local_key_counts[chunk]; int const * restrict const my_bucket_keys_1D = &(my_bucket_keys[start_index]); for(int i=0; i<max_chunks; i++) { const unsigned int key_index = my_bucket_keys_1D[i] - my_min_key; assert(my_bucket_keys_1D[i] >= my_min_key); assert(key_index < BUCKET_WIDTH); my_local_key_counts_1D[key_index]++; } } #endif /* * Counts the occurence of each key in my bucket. * Key indices into the count array are the key's value minus my bucket's * minimum key value to allow indexing from 0. * my_bucket_keys: All keys in my bucket unsorted [my_rank * BUCKET_WIDTH, (my_rank+1)*BUCKET_WIDTH) */ static inline int* count_local_keys(KEY_TYPE const * restrict const my_bucket_keys) { timer_start(&timers[TIMER_SORT]); const int my_rank = shmem_my_pe(); const int my_min_key = my_rank * BUCKET_WIDTH; const int max_chunks = (int) my_bucket_size / actual_num_workers; #if defined(_SHMEM_WORKERS) int lowBound = 0; int highBound = CHUNKS_COUNT_LOCAL_KEYS; int stride = 1; int tile_size = 1; int loop_dimension = 1; count_local_keys_async_t args = {max_chunks, my_min_key}; shmem_task_scope_begin(); shmem_parallel_for_nbi(count_local_keys_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE); shmem_task_scope_end(); #else #if defined(_OPENMP) int chunk; #pragma omp parallel for private(chunk) schedule (static,1) #endif for(chunk=0; chunk<CHUNKS_COUNT_LOCAL_KEYS; chunk++) { const int start_index = chunk * max_chunks; int * restrict my_local_key_counts_1D = my_local_key_counts[chunk]; int const * restrict const my_bucket_keys_1D = &(my_bucket_keys[start_index]); for(int i=0; i<max_chunks; i++) { const unsigned int key_index = my_bucket_keys_1D[i] - my_min_key; assert(my_bucket_keys_1D[i] >= my_min_key); assert(key_index < BUCKET_WIDTH); my_local_key_counts_1D[key_index]++; } } #endif //sequential part here const int leftover = my_bucket_size - (max_chunks * CHUNKS_COUNT_LOCAL_KEYS); if(leftover) { const int chunk = CHUNKS_COUNT_LOCAL_KEYS - 1; for(int i=(my_bucket_size-leftover); i<my_bucket_size; i++) { const unsigned int key_index = my_bucket_keys[i] - my_min_key; assert(my_bucket_keys[i] >= my_min_key); assert(key_index < BUCKET_WIDTH); my_local_key_counts[chunk][key_index]++; } } timer_stop(&timers[TIMER_SORT]); #ifdef DEBUG wait_my_turn(); char msg[4096]; sprintf(msg,"Rank %d: Bucket Size %lld | Local Key Counts:", my_rank, my_bucket_size); for(int chunk=0; chunk<actual_num_workers; chunk++) { for(uint64_t i = 0; i < BUCKET_WIDTH; ++i){ if(i < PRINT_MAX) sprintf(msg + strlen(msg),"%d ", my_local_key_counts[chunk][i]); } } sprintf(msg + strlen(msg),"\n"); printf("%s",msg); fflush(stdout); my_turn_complete(); #endif return NULL; } typedef struct verify_results_async_t { int max_chunks; int my_min_key; int my_max_key; } verify_results_async_t; void verify_results_async(void* args, int chunk) { verify_results_async_t* arg = (verify_results_async_t*) args; const int max_chunks = arg->max_chunks; const int my_min_key = arg->my_min_key; const int my_max_key = arg->my_max_key; const int start_index = chunk * max_chunks; const int max_index = start_index + max_chunks; for(int i=start_index; i<max_index; i++) { const int key = my_bucket_keys[i]; if((key < my_min_key) || (key > my_max_key)){ printf("Rank %d Failed Verification!\n",shmem_my_pe()); printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key); } } } /* * Verifies the correctness of the sort. * Ensures all keys are within a PE's bucket boundaries. * Ensures the final number of keys is equal to the initial. */ static int verify_results(KEY_TYPE const * restrict const my_local_keys) { shmem_barrier_all(); int error = 0; const int my_rank = shmem_my_pe(); const int my_min_key = my_rank * BUCKET_WIDTH; const int my_max_key = (my_rank+1) * BUCKET_WIDTH - 1; const int max_chunks = (int) my_bucket_size / actual_num_workers; #if defined(_SHMEM_WORKERS) int lowBound = 0; int highBound = actual_num_workers; int stride = 1; int tile_size = 1; int loop_dimension = 1; verify_results_async_t args = {max_chunks, my_min_key, my_max_key}; shmem_task_scope_begin(); shmem_parallel_for_nbi(verify_results_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE); shmem_task_scope_end(); #else #if defined(_OPENMP) int chunk; #pragma omp parallel for private(chunk) schedule (static,1) #endif // Verify all keys are within bucket boundaries for(chunk=0; chunk<actual_num_workers; chunk++) { const int start_index = chunk * max_chunks; const int max_index = start_index + max_chunks; for(int i=start_index; i<max_index; i++) { const int key = my_bucket_keys[i]; if((key < my_min_key) || (key > my_max_key)){ printf("Rank %d Failed Verification!\n",shmem_my_pe()); printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key); } } } #endif //sequential part here const int leftover = my_bucket_size - (max_chunks * actual_num_workers); if(leftover) { for(int i=(my_bucket_size-leftover); i<my_bucket_size; i++) { const int key = my_local_keys[i]; if((key < my_min_key) || (key > my_max_key)){ printf("Rank %d Failed Verification!\n",my_rank); printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key); error = 1; } } } // Verify the sum of the key population equals the expected bucket size long long int bucket_size_test = 0; for(int chunk=0; chunk<CHUNKS_COUNT_LOCAL_KEYS; chunk++) { for(uint64_t i = 0; i < BUCKET_WIDTH; ++i){ bucket_size_test += my_local_key_counts[chunk][i]; } } if(bucket_size_test != my_bucket_size){ printf("Rank %d Failed Verification!\n",my_rank); printf("Actual Bucket Size: %lld Should be %lld\n", bucket_size_test, my_bucket_size); error = 1; } // Verify the final number of keys equals the initial number of keys static long long int total_num_keys = 0; shmem_longlong_sum_to_all(&total_num_keys, &my_bucket_size, 1, 0, 0, NUM_PES, llWrk, pSync); shmem_barrier_all(); if(total_num_keys != (long long int)(NUM_KEYS_PER_PE * NUM_PES)){ if(my_rank == ROOT_PE){ printf("Verification Failed!\n"); printf("Actual total number of keys: %lld Expected %" PRIu64 "\n", total_num_keys, NUM_KEYS_PER_PE * NUM_PES ); error = 1; } } return error; } /* * Gathers all the timing information from each PE and prints * it to a file. All information from a PE is printed as a row in a tab seperated file */ static void log_times(char * log_file) { FILE * fp = NULL; for(uint64_t i = 0; i < TIMER_NTIMERS; ++i){ timers[i].all_times = gather_rank_times(&timers[i]); timers[i].all_counts = gather_rank_counts(&timers[i]); } if(shmem_my_pe() == ROOT_PE) { int print_names = 0; if(file_exists(log_file) != 1){ print_names = 1; } if((fp = fopen(log_file, "a+b"))==NULL){ perror("Error opening log file:"); exit(1); } if(print_names == 1){ print_run_info(fp); print_timer_names(fp); } print_timer_values(fp); report_summary_stats(); fclose(fp); } } /* * Computes the average total time and average all2all time and prints it to the command line */ static void report_summary_stats(void) { if(timers[TIMER_TOTAL].seconds_iter > 0) { const uint32_t num_records = NUM_PES * timers[TIMER_TOTAL].seconds_iter; double temp = 0.0; for(uint64_t i = 0; i < num_records; ++i){ temp += timers[TIMER_TOTAL].all_times[i]; } #ifdef EXTRA_STATS avg_time = temp/num_records; #endif printf("Average total time (per PE): %f seconds\n", temp/num_records); } if(timers[TIMER_ATA_KEYS].seconds_iter >0) { const uint32_t num_records = NUM_PES * timers[TIMER_ATA_KEYS].seconds_iter; double temp = 0.0; for(uint64_t i = 0; i < num_records; ++i){ temp += timers[TIMER_ATA_KEYS].all_times[i]; } #ifdef EXTRA_STATS avg_time_all2all = temp/num_records; #endif printf("Average all2all time (per PE): %f seconds\n", temp/num_records); } } /* * Prints all the labels for each timer as a row to the file specified by 'fp' */ static void print_timer_names(FILE * fp) { for(uint64_t i = 0; i < TIMER_NTIMERS; ++i){ if(timers[i].seconds_iter > 0){ fprintf(fp, "%s (sec)\t", timer_names[i]); } if(timers[i].count_iter > 0){ fprintf(fp, "%s_COUNTS\t", timer_names[i]); } } fprintf(fp,"\n"); } /* * Prints all the relevant runtime parameters as a row to the file specified by 'fp' */ static void print_run_info(FILE * fp) { fprintf(fp,"SHMEM\t"); fprintf(fp,"NUM_PES %" PRIu64 "\t", NUM_PES); fprintf(fp,"Max_Key %" PRIu64 "\t", MAX_KEY_VAL); fprintf(fp,"Num_Iters %u\t", NUM_ITERATIONS); switch(SCALING_OPTION){ case STRONG: { fprintf(fp,"Strong Scaling: %" PRIu64 " total keys\t", NUM_KEYS_PER_PE * NUM_PES); break; } case WEAK: { fprintf(fp,"Weak Scaling: %" PRIu64 " keys per PE\t", NUM_KEYS_PER_PE); break; } case WEAK_ISOBUCKET: { fprintf(fp,"Weak Scaling Constant Bucket Width: %" PRIu64 "u keys per PE \t", NUM_KEYS_PER_PE); fprintf(fp,"Constant Bucket Width: %" PRIu64 "\t", BUCKET_WIDTH); break; } default: { fprintf(fp,"Invalid Scaling Option!\t"); break; } } #ifdef PERMUTE fprintf(fp,"Randomized All2All\t"); #elif INCAST fprintf(fp,"Incast All2All\t"); #else fprintf(fp,"Round Robin All2All\t"); #endif fprintf(fp,"\n"); } /* * Prints all of the timining information for an individual PE as a row * to the file specificed by 'fp'. */ static void print_timer_values(FILE * fp) { unsigned int num_records = NUM_PES * NUM_ITERATIONS; for(uint64_t i = 0; i < num_records; ++i) { for(int t = 0; t < TIMER_NTIMERS; ++t){ if(timers[t].all_times != NULL){ fprintf(fp,"%f\t", timers[t].all_times[i]); } if(timers[t].all_counts != NULL){ fprintf(fp,"%u\t", timers[t].all_counts[i]); } } fprintf(fp,"\n"); } } /* * Aggregates the per PE timing information */ static double * gather_rank_times(_timer_t * const timer) { if(timer->seconds_iter > 0) { assert(timer->seconds_iter == timer->num_iters); const unsigned int num_records = NUM_PES * timer->seconds_iter; #ifdef OPENSHMEM_COMPLIANT double * my_times = shmem_malloc(timer->seconds_iter * sizeof(double)); assert(my_times); if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", timer->seconds_iter * sizeof(double)); #else double * my_times = shmalloc(timer->seconds_iter * sizeof(double)); #endif memcpy(my_times, timer->seconds, timer->seconds_iter * sizeof(double)); #ifdef OPENSHMEM_COMPLIANT double * all_times = shmem_malloc( num_records * sizeof(double)); assert(all_times); if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", num_records * sizeof(double)); #else double * all_times = shmalloc( num_records * sizeof(double)); #endif shmem_barrier_all(); shmem_fcollect64(all_times, my_times, timer->seconds_iter, 0, 0, NUM_PES, pSync); shmem_barrier_all(); #ifdef OPENSHMEM_COMPLIANT shmem_free(my_times); #else shfree(my_times); #endif return all_times; } else{ return NULL; } } /* * Aggregates the per PE timing 'count' information */ static unsigned int * gather_rank_counts(_timer_t * const timer) { if(timer->count_iter > 0){ const unsigned int num_records = NUM_PES * timer->num_iters; #ifdef OPENSHMEM_COMPLIANT unsigned int * my_counts = shmem_malloc(timer->num_iters * sizeof(unsigned int)); assert(my_counts); if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", timer->num_iters * sizeof(unsigned int)); #else unsigned int * my_counts = shmalloc(timer->num_iters * sizeof(unsigned int)); #endif memcpy(my_counts, timer->count, timer->num_iters*sizeof(unsigned int)); #ifdef OPENSHMEM_COMPLIANT unsigned int * all_counts = shmem_malloc( num_records * sizeof(unsigned int) ); assert(all_counts); if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", num_records * sizeof(unsigned int)); #else unsigned int * all_counts = shmalloc( num_records * sizeof(unsigned int) ); #endif shmem_barrier_all(); shmem_collect32(all_counts, my_counts, timer->num_iters, 0, 0, NUM_PES, pSync); shmem_barrier_all(); #ifdef OPENSHMEM_COMPLIANT shmem_free(my_counts); #else shfree(my_counts); #endif return all_counts; } else{ return NULL; } } /* * Seeds each rank based on the worker number, rank and time */ static inline pcg32_random_t seed_my_chunk(int chunk) { const unsigned int my_rank = shmem_my_pe(); const unsigned int my_virtual_rank = GET_VIRTUAL_RANK(my_rank, chunk); pcg32_random_t rng; pcg32_srandom_r(&rng, (uint64_t) my_virtual_rank, (uint64_t) my_virtual_rank ); return rng; } /* * Seeds each rank based on the rank number and time */ static inline pcg32_random_t seed_my_rank(void) { const unsigned int my_rank = shmem_my_pe(); pcg32_random_t rng; pcg32_srandom_r(&rng, (uint64_t) my_rank, (uint64_t) my_rank ); return rng; } /* * Initializes the work array required for SHMEM collective functions */ static void init_shmem_sync_array(long * restrict const pSync) { for(uint64_t i = 0; i < _SHMEM_REDUCE_SYNC_SIZE; ++i){ pSync[i] = _SHMEM_SYNC_VALUE; } shmem_barrier_all(); } /* * Tests whether or not a file exists. * Returns 1 if file exists * Returns 0 if file does not exist */ static int file_exists(char * filename) { struct stat buffer; if(stat(filename,&buffer) == 0){ return 1; } else { return 0; } } #ifdef DEBUG static void wait_my_turn() { shmem_barrier_all(); whose_turn = 0; shmem_barrier_all(); const int my_rank = shmem_my_pe(); shmem_int_wait_until((int*)&whose_turn, SHMEM_CMP_EQ, my_rank); sleep(1); } static void my_turn_complete() { const int my_rank = shmem_my_pe(); const int next_rank = my_rank+1; if(my_rank < (NUM_PES-1)){ // Last rank updates no one shmem_int_put((int *) &whose_turn, &next_rank, 1, next_rank); } shmem_barrier_all(); } #endif #ifdef PERMUTE /* * Creates a randomly ordered array of PEs used in the exchange_keys function */ static void create_permutation_array() { permute_array = (int *) malloc( NUM_PES * sizeof(int) ); assert(permute_array); for(uint64_t i = 0; i < NUM_PES; ++i){ permute_array[i] = i; } shuffle(permute_array, NUM_PES, sizeof(int)); } /* * Randomly shuffles a generic array */ static void shuffle(void * array, size_t n, size_t size) { char tmp[size]; char * arr = array; size_t stride = size * sizeof(char); if(n > 1){ for(size_t i = 0; i < (n - 1); ++i){ size_t rnd = (size_t) rand(); size_t j = i + rnd/(RAND_MAX/(n - i) + 1); memcpy(tmp, arr + j*stride, size); memcpy(arr + j*stride, arr + i*stride, size); memcpy(arr + i*stride, tmp, size); } } } #endif
mm-omp-ori.c
/** * * Matrix Multiplication - Shared-memory (OpenMP) * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> #include <omp.h> #include <xmmintrin.h> int size; int threads; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef LINUX struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000LL); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000LL); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; // allocate array for all the rows m->element = (float**)malloc(sizeof(float*) * size); if (m->element == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { m->element[i] = (float*)malloc(sizeof(float) * size); if (m->element[i] == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } } } /** * Free the memory allocated to a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) { free(m->element[i]); } free(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Parallelize the multiplication // Each thread will work on one iteration of the outer-most loop // Variables are shared among threads (a, b, result) // and each thread has its own private copy (i, j, k) #pragma omp parallel for shared(a, b, result) private (i, j, k) for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[k][j]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result; long long before, after; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform parallel matrix multiplication before = wall_clock_time(); mm(a, b, result); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000); // Print the result matrix // print_matrix(result); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size> <threads>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; if (argc >= 3) threads = atoi(argv[2]); else threads = -1; // Multiply the matrices if (threads != -1) { omp_set_num_threads(threads); } #pragma omp parallel { threads = omp_get_num_threads(); } printf("Matrix multiplication of size %d using %d threads\n", size, threads); work(); return 0; }
image_random-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file image_random-inl.h * \brief * \author */ #ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_ #define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_ #include <mxnet/base.h> #include <algorithm> #include <vector> #include <cmath> #include <limits> #include <utility> #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace image { inline bool ToTensorShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TShape &shp = (*in_attrs)[0]; if (!shp.ndim()) return false; CHECK_EQ(shp.ndim(), 3) << "Input image must have shape (height, width, channels), but got " << shp; SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[2], shp[0], shp[1]})); return true; } inline bool ToTensorType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32); return (*in_attrs)[0] != -1; } inline void ToTensor(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(req[0], kWriteTo) << "`to_tensor` does not support inplace"; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int channel = inputs[0].shape_[2]; MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { float* output = outputs[0].dptr<float>(); DType* input = inputs[0].dptr<DType>(); for (int l = 0; l < length; ++l) { for (int c = 0; c < channel; ++c) { output[c*length + l] = static_cast<float>(input[l*channel + c]) / 255.0f; } } }); } // Normalize Operator // Parameter registration for image Normalize operator struct NormalizeParam : public dmlc::Parameter<NormalizeParam> { nnvm::Tuple<float> mean; nnvm::Tuple<float> std; DMLC_DECLARE_PARAMETER(NormalizeParam) { DMLC_DECLARE_FIELD(mean) .set_default(nnvm::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f}) .describe("Sequence of means for each channel. " "Default value is 0."); DMLC_DECLARE_FIELD(std) .set_default(nnvm::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f}) .describe("Sequence of standard deviations for each channel. " "Default value is 1."); } }; // Shape and Type inference for image Normalize operator // Shape inference inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { const NormalizeParam &param = nnvm::get<NormalizeParam>(attrs.parsed); const auto& dshape = (*in_attrs)[0]; if (!dshape.ndim()) return false; CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4)) << "Input tensor must have shape (channels, height, width), or " << "(N, channels, height, width), but got " << dshape; uint32_t nchannels; if (dshape.ndim() == 3) { nchannels = dshape[0]; CHECK(nchannels == 3 || nchannels == 1) << "The first dimension of input tensor must be the channel dimension with " << "either 1 or 3 elements, but got input with shape " << dshape; } else if (dshape.ndim() == 4) { nchannels = dshape[1]; CHECK(nchannels == 3 || nchannels == 1) << "The second dimension of input tensor must be the channel dimension with " << "either 1 or 3 elements, but got input with shape " << dshape; } CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels)) << "Invalid mean for input with shape " << dshape << ". mean must have either 1 or " << nchannels << " elements, but got " << param.mean; CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels) << "Invalid std for input with shape " << dshape << ". std must have either 1 or " << nchannels << " elements, but got " << param.std; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } // Type Inference inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } template<int req> struct normalize_forward { template<typename DType> MSHADOW_XINLINE static void Map(int j, DType* out_data, const DType* in_data, const int i, const int length, const int step, const DType mean, const DType std_dev) { KERNEL_ASSIGN(out_data[step + i*length + j], req, (in_data[step + i*length + j] - mean) / std_dev); } }; template<typename xpu> void NormalizeImpl(const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const std::vector<OpReqType> &req, const NormalizeParam &param, const int length, const uint32_t channel, const int step = 0) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { DType* input = inputs[0].dptr<DType>(); DType* output = outputs[0].dptr<DType>(); for (uint32_t i = 0; i < channel; ++i) { DType mean = param.mean[param.mean.ndim() > i ? i : 0]; DType std_dev = param.std[param.std.ndim() > i ? i : 0]; mxnet_op::Kernel<normalize_forward<req_type>, xpu>::Launch( s, length, output, input, i, length, step, mean, std_dev); } }); }); } template<typename xpu> void NormalizeOpForward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const NormalizeParam &param = nnvm::get<NormalizeParam>(attrs.parsed); // 3D input (c, h, w) if (inputs[0].ndim() == 3) { const int length = inputs[0].shape_[1] * inputs[0].shape_[2]; const uint32_t channel = inputs[0].shape_[0]; NormalizeImpl<xpu>(ctx, inputs, outputs, req, param, length, channel); } else if (inputs[0].ndim() == 4) { // 4D input (n, c, h, w) const int batch_size = inputs[0].shape_[0]; const int length = inputs[0].shape_[2] * inputs[0].shape_[3]; const uint32_t channel = inputs[0].shape_[1]; const int step = channel * length; #pragma omp parallel for for (auto n = 0; n < batch_size; ++n) { NormalizeImpl<xpu>(ctx, inputs, outputs, req, param, length, channel, n*step); } } } // Backward function template<int req> struct normalize_backward { template<typename DType> MSHADOW_XINLINE static void Map(int j, DType* in_grad, const DType* out_grad, const int i, const int length, const int step, const DType std_dev) { // d/dx{(x - mean) / std_dev} => (1 / std_dev) KERNEL_ASSIGN(in_grad[step + i*length + j], req, out_grad[step + i*length + j] * (1.0 / std_dev)); } }; template<typename xpu> void NormalizeBackwardImpl(const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const std::vector<OpReqType> &req, const NormalizeParam &param, const int length, const uint32_t channel, const int step = 0) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& out_grad = inputs[0]; const TBlob& in_grad = outputs[0]; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { for (uint32_t i = 0; i < channel; ++i) { DType std_dev = param.std[param.std.ndim() > i ? i : 0]; mxnet_op::Kernel<normalize_backward<req_type>, xpu>::Launch( s, length, in_grad.dptr<DType>(), out_grad.dptr<DType>(), i, length, step, std_dev); } }); }); } template<typename xpu> void NormalizeOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const NormalizeParam &param = nnvm::get<NormalizeParam>(attrs.parsed); // Note: inputs[0] is out_grad const TBlob& in_data = inputs[1]; // 3D input (c, h, w) if (in_data.ndim() == 3) { const int length = in_data.shape_[1] * in_data.shape_[2]; const uint32_t channel = in_data.shape_[0]; NormalizeBackwardImpl<xpu>(ctx, inputs, outputs, req, param, length, channel); } else if (in_data.ndim() == 4) { // 4D input (n, c, h, w) const int batch_size = in_data.shape_[0]; const int length = in_data.shape_[2] * in_data.shape_[3]; const uint32_t channel = in_data.shape_[1]; const int step = channel * length; #pragma omp parallel for for (auto n = 0; n < batch_size; ++n) { NormalizeBackwardImpl<xpu>(ctx, inputs, outputs, req, param, length, channel, n*step); } } } template<typename DType> inline DType saturate_cast(const float& src) { return static_cast<DType>(src); } template<> inline uint8_t saturate_cast(const float& src) { return std::min(std::max(src, 0.f), 255.f); } inline bool ImageShape(const nnvm::NodeAttrs& attrs, std::vector<TShape> *in_attrs, std::vector<TShape> *out_attrs) { TShape& dshape = (*in_attrs)[0]; CHECK_EQ(dshape.ndim(), 3) << "Input image must have shape (height, width, channels), but got " << dshape; auto nchannels = dshape[dshape.ndim()-1]; CHECK(nchannels == 3 || nchannels == 1) << "The last dimension of input image must be the channel dimension with " << "either 1 or 3 elements, but got input with shape " << dshape; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<typename DType, int axis> void FlipImpl(const TShape &shape, DType *src, DType *dst) { int head = 1, mid = shape[axis], tail = 1; for (int i = 0; i < axis; ++i) head *= shape[i]; for (uint32_t i = axis+1; i < shape.ndim(); ++i) tail *= shape[i]; for (int i = 0; i < head; ++i) { for (int j = 0; j < (mid >> 1); ++j) { int idx1 = (i*mid + j) * tail; int idx2 = idx1 + (mid-(j << 1)-1) * tail; for (int k = 0; k < tail; ++k, ++idx1, ++idx2) { DType tmp = src[idx1]; dst[idx1] = src[idx2]; dst[idx2] = tmp; } } } } inline void FlipLeftRight(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); }); } inline void FlipTopBottom(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); }); } inline void RandomFlipLeftRight( const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { if (std::bernoulli_distribution()(prnd->GetRndEngine())) { if (outputs[0].dptr_ != inputs[0].dptr_) { std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType)); } } else { FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); } }); } inline void RandomFlipTopBottom( const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { if (std::bernoulli_distribution()(prnd->GetRndEngine())) { if (outputs[0].dptr_ != inputs[0].dptr_) { std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType)); } } else { FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); } }); } struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> { float min_factor; float max_factor; DMLC_DECLARE_PARAMETER(RandomEnhanceParam) { DMLC_DECLARE_FIELD(min_factor) .set_lower_bound(0.0) .describe("Minimum factor."); DMLC_DECLARE_FIELD(max_factor) .set_lower_bound(0.0) .describe("Maximum factor."); } }; inline void AdjustBrightnessImpl(const float& alpha_b, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; int length = inputs[0].Size(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); for (int l = 0; l < length; ++l) { float val = static_cast<float>(input[l]) * alpha_b; output[l] = saturate_cast<DType>(val); } }); } inline void RandomBrightness(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); float alpha_b = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs); } inline void AdjustContrastImpl(const float& alpha_c, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; static const float coef[] = { 0.299f, 0.587f, 0.114f }; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int nchannels = inputs[0].shape_[2]; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); float sum = 0.f; if (nchannels > 1) { for (int l = 0; l < length; ++l) { for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c]; } } else { for (int l = 0; l < length; ++l) sum += input[l]; } float gray_mean = sum / static_cast<float>(length); float beta = (1 - alpha_c) * gray_mean; for (int l = 0; l < length * nchannels; ++l) { float val = input[l] * alpha_c + beta; output[l] = saturate_cast<DType>(val); } }); } inline void RandomContrast(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); float alpha_c = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs); } inline void AdjustSaturationImpl(const float& alpha_s, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { static const float coef[] = { 0.299f, 0.587f, 0.114f }; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int nchannels = inputs[0].shape_[2]; float alpha_o = 1.f - alpha_s; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); if (nchannels == 1) { for (int l = 0; l < length; ++l) output[l] = input[l]; return; } for (int l = 0; l < length; ++l) { float gray = 0.f; for (int c = 0; c < 3; ++c) { gray = input[l*3 + c] * coef[c]; } gray *= alpha_o; for (int c = 0; c < 3; ++c) { float val = gray + input[l*3 + c] * alpha_s; output[l*3 + c] = saturate_cast<DType>(val); } } }); } inline void RandomSaturation(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); float alpha_s = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs); } inline void RGB2HLSConvert(const float& src_r, const float& src_g, const float& src_b, float *dst_h, float *dst_l, float *dst_s) { float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f; float h = 0.f, s = 0.f, l; float vmin; float vmax; float diff; vmax = vmin = r; vmax = std::fmax(vmax, g); vmax = std::fmax(vmax, b); vmin = std::fmin(vmin, g); vmin = std::fmin(vmin, b); diff = vmax - vmin; l = (vmax + vmin) * 0.5f; if (diff > std::numeric_limits<float>::epsilon()) { s = (l < 0.5f) * diff / (vmax + vmin); s += (l >= 0.5f) * diff / (2.0f - vmax - vmin); diff = 60.f / diff; h = (vmax == r) * (g - b) * diff; h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f); h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f); h += (h < 0.f) * 360.f; } *dst_h = h; *dst_l = l; *dst_s = s; } inline void HLS2RGBConvert(const float& src_h, const float& src_l, const float& src_s, float *dst_r, float *dst_g, float *dst_b) { static const int c_HlsSectorData[6][3] = { { 1, 3, 0 }, { 1, 0, 2 }, { 3, 0, 1 }, { 0, 2, 1 }, { 0, 1, 3 }, { 2, 1, 0 } }; float h = src_h, l = src_l, s = src_s; float b = l, g = l, r = l; if (s != 0) { float p2 = (l <= 0.5f) * l * (1 + s); p2 += (l > 0.5f) * (l + s - l * s); float p1 = 2 * l - p2; h *= 1.f / 60.f; if (h < 0) { do { h += 6; } while (h < 0); } else if (h >= 6) { do { h -= 6; } while (h >= 6); } int sector = static_cast<int>(h); h -= sector; float tab[4]; tab[0] = p2; tab[1] = p1; tab[2] = p1 + (p2 - p1) * (1 - h); tab[3] = p1 + (p2 - p1) * h; b = tab[c_HlsSectorData[sector][0]]; g = tab[c_HlsSectorData[sector][1]]; r = tab[c_HlsSectorData[sector][2]]; } *dst_b = b * 255.f; *dst_g = g * 255.f; *dst_r = r * 255.f; } inline void AdjustHueImpl(float alpha, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { int length = inputs[0].shape_[0] * inputs[0].shape_[1]; if (inputs[0].shape_[2] == 1) return; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* input = inputs[0].dptr<DType>(); DType* output = outputs[0].dptr<DType>(); for (int i = 0; i < length; ++i) { float h, l, s; float r = static_cast<float>(*(input++)); float g = static_cast<float>(*(input++)); float b = static_cast<float>(*(input++)); RGB2HLSConvert(r, g, b, &h, &l, &s); h += alpha * 360.f; HLS2RGBConvert(h, l, s, &r, &g, &b); *(output++) = saturate_cast<DType>(r); *(output++) = saturate_cast<DType>(g); *(output++) = saturate_cast<DType>(b); } }); } inline void RandomHue(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); float alpha = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustHueImpl(alpha, ctx, inputs, req, outputs); } struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> { float brightness; float contrast; float saturation; float hue; DMLC_DECLARE_PARAMETER(RandomColorJitterParam) { DMLC_DECLARE_FIELD(brightness) .describe("How much to jitter brightness."); DMLC_DECLARE_FIELD(contrast) .describe("How much to jitter contrast."); DMLC_DECLARE_FIELD(saturation) .describe("How much to jitter saturation."); DMLC_DECLARE_FIELD(hue) .describe("How much to jitter hue."); } }; inline void RandomColorJitter(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomColorJitterParam &param = nnvm::get<RandomColorJitterParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); int order[4] = {0, 1, 2, 3}; std::shuffle(order, order + 4, prnd->GetRndEngine()); bool flag = false; for (int i = 0; i < 4; ++i) { switch (order[i]) { case 0: if (param.brightness > 0) { float alpha_b = 1.0 + std::uniform_real_distribution<float>( -param.brightness, param.brightness)(prnd->GetRndEngine()); AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; case 1: if (param.contrast > 0) { float alpha_c = 1.0 + std::uniform_real_distribution<float>( -param.contrast, param.contrast)(prnd->GetRndEngine()); AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; case 2: if (param.saturation > 0) { float alpha_s = 1.f + std::uniform_real_distribution<float>( -param.saturation, param.saturation)(prnd->GetRndEngine()); AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; case 3: if (param.hue > 0) { float alpha_h = std::uniform_real_distribution<float>( -param.hue, param.hue)(prnd->GetRndEngine()); AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; } } } struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> { nnvm::Tuple<float> alpha; DMLC_DECLARE_PARAMETER(AdjustLightingParam) { DMLC_DECLARE_FIELD(alpha) .describe("The lighting alphas for the R, G, B channels."); } }; struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> { float alpha_std; DMLC_DECLARE_PARAMETER(RandomLightingParam) { DMLC_DECLARE_FIELD(alpha_std) .set_default(0.05) .describe("Level of the lighting noise."); } }; inline void AdjustLightingImpl(const nnvm::Tuple<float>& alpha, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { static const float eig[3][3] = { { 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 }, { 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 }, { 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 } }; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int channels = inputs[0].shape_[2]; if (channels == 1) return; float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2]; float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2]; float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2]; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); for (int i = 0; i < length; i++) { int base_ind = 3 * i; float in_r = static_cast<float>(input[base_ind]); float in_g = static_cast<float>(input[base_ind + 1]); float in_b = static_cast<float>(input[base_ind + 2]); output[base_ind] = saturate_cast<DType>(in_r + pca_r); output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g); output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b); } }); } inline void AdjustLighting(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const AdjustLightingParam &param = nnvm::get<AdjustLightingParam>(attrs.parsed); AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs); } inline void RandomLighting(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomLightingParam &param = nnvm::get<RandomLightingParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); std::normal_distribution<float> dist(0, param.alpha_std); float alpha_r = dist(prnd->GetRndEngine()); float alpha_g = dist(prnd->GetRndEngine()); float alpha_b = dist(prnd->GetRndEngine()); AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs); } #define MXNET_REGISTER_IMAGE_AUG_OP(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ImageShape) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \ .add_argument("data", "NDArray-or-Symbol", "The input.") #define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \ MXNET_REGISTER_IMAGE_AUG_OP(name) \ .set_attr<FResourceRequest>("FResourceRequest", \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \ }) } // namespace image } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
Example_tasking.1.c
/* * @@name: tasking.1c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.0 */ struct node { struct node *left; struct node *right; }; extern void process(struct node *); void traverse( struct node *p ) { if (p->left) #pragma omp task // p is firstprivate by default traverse(p->left); if (p->right) #pragma omp task // p is firstprivate by default traverse(p->right); process(p); }
main.c
#include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <ctype.h> #include <assert.h> #include <math.h> //#include "ocl-kernel-gen.h" #include "parse-args.h" #include "sgtype.h" #include "sgbuf.h" #include "sgtime.h" #include "trace-util.h" #include "sp_alloc.h" #include "morton.h" #include "hilbert3d.h" #if defined( USE_OPENCL ) #include "../opencl/ocl-backend.h" #endif #if defined( USE_OPENMP ) #include <omp.h> #include "openmp/omp-backend.h" #include "openmp/openmp_kernels.h" #endif #if defined ( USE_CUDA ) #include <cuda.h> #include "cuda/cuda-backend.h" #endif #if defined( USE_SERIAL ) #include "serial/serial-kernels.h" #endif #if defined( USE_PAPI ) #include <papi.h> #include "papi_helper.h" #endif #define ALIGNMENT (4096) #define xstr(s) str(s) #define str(s) #s const char* SPATTER_VERSION="0.4"; //SGBench specific enums extern enum sg_backend backend; //Strings defining program behavior extern char platform_string[STRING_SIZE]; extern char device_string[STRING_SIZE]; extern char kernel_file[STRING_SIZE]; extern char kernel_name[STRING_SIZE]; extern int cuda_dev; extern int validate_flag; extern int quiet_flag; extern int aggregate_flag; extern int compress_flag; extern int papi_nevents; extern int stride_kernel; #ifdef USE_PAPI extern char papi_event_names[PAPI_MAX_COUNTERS][STRING_SIZE]; int papi_event_codes[PAPI_MAX_COUNTERS]; long long papi_event_values[PAPI_MAX_COUNTERS]; extern const char* const papi_ctr_str[]; #endif void print_papi_names() { #ifdef USE_PAPI printf("\nPAPI Counters: %d\n", papi_nevents); if (papi_nevents > 0) { printf("{ "); for (int i = 0; i < papi_nevents; i++) { printf("\"%s\":\"%s\"", papi_ctr_str[i], papi_event_names[i]); if (i != papi_nevents-1) { printf(",\n "); } } printf(" }\n"); } #endif } void print_system_info(){ printf("\nRunning Spatter version %s\n",SPATTER_VERSION); printf("Compiler: %s ver. %s\n", xstr(SPAT_C_NAME), xstr(SPAT_C_VER)); printf("Compiler Location: %s\n", xstr(SPAT_C)); //printf("Contributors: Patrick Lavin, Jeff Young, Aaron Vose\n"); printf("Backend: "); if(backend == OPENMP) printf("OPENMP\n"); if(backend == OPENCL) printf("OPENCL\n"); if(backend == CUDA) printf("CUDA\n"); printf("Aggregate Results? %s\n", aggregate_flag ? "YES" : "NO"); #ifdef USE_CUDA if (backend == CUDA) { struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, cuda_dev); printf("Device: %s\n", prop.name); } #endif print_papi_names(); printf("\n"); } void print_header(){ //printf("kernel op time source_size target_size idx_len bytes_moved actual_bandwidth omp_threads vector_len block_dim shmem\n"); printf("%-7s %-12s %-12s", "config", "time(s)","bw(MB/s)"); #ifdef USE_PAPI for (int i = 0; i < papi_nevents; i++) { printf(" %-12s", papi_ctr_str[i]); } #endif printf("\n"); } int compare (const void * a, const void * b) { if (*(double*)a > *(double*)b) return 1; else if (*(double*)a < *(double*)b) return -1; else return 0; } /** Time reported in seconds, sizes reported in bytes, bandwidth reported in mib/s" */ double report_time(int ii, double time, struct run_config rc, int idx){ size_t bytes_moved = 0; double actual_bandwidth = 0; bytes_moved = sizeof(sgData_t) * rc.pattern_len * rc.generic_len; actual_bandwidth = bytes_moved / time / 1000. / 1000.; printf("%-7d %-12.4g %-12.6g", ii, time, actual_bandwidth); #ifdef USE_PAPI for (int i = 0; i < papi_nevents; i++) { printf(" %-12lld", rc.papi_ctr[idx][i]); } #endif printf("\n"); return actual_bandwidth; } void report_time2(struct run_config* rc, int nrc) { double *bw = (double*)malloc(sizeof(double)*nrc); assert(bw); for (int k = 0; k < nrc; k++) { if (aggregate_flag) { double min_time_ms = rc[k].time_ms[0]; int min_idx = 0; for (int i = 1; i < rc[k].nruns; i++) { if (rc[k].time_ms[i] < min_time_ms) { min_time_ms = rc[k].time_ms[i]; min_idx = i; } } bw[k] = report_time(k, min_time_ms/1000., rc[k], min_idx); } else { for (int i = 0; i < rc[k].nruns; i++) { report_time(k, rc[k].time_ms[i]/1000., rc[k], i); } } } if (aggregate_flag) { double min = bw[0]; double max = bw[0]; double hmean = 0; double first, med, third; qsort(bw, nrc, sizeof(double), compare); for (int i = 0; i < nrc; i++) { if (bw[i] < min) { min = bw[i]; } if (bw[i] > max) { max = bw[i]; } } first = bw[nrc/4]; med = bw[nrc/2]; third = bw[3*nrc/4]; // Harmonic mean for (int i = 0; i < nrc; i++) { hmean += 1./bw[i]; } hmean = 1./hmean * nrc; // Harmonic Standard Error // Reference: The Standard Errors of the Geometric and // Harmonic Means and Their Application to Index Numbers // Author: Nilan Norris // URL: https://www.jstor.org/stable/2235723 double E1_x = 0; for (int i = 0; i < nrc; i++) { E1_x += 1./bw[i]; } E1_x = E1_x / nrc; double theta_22 = pow(1./E1_x, 2); double sig_1x = 0; for (int i = 0; i < nrc; i++) { sig_1x += pow(1./bw[i] - E1_x,2); } sig_1x = sqrt(sig_1x / nrc); double hstderr = theta_22 * sig_1x / sqrt(nrc); printf("\n%-11s %-12s %-12s %-12s %-12s\n", "Min", "25%","Med","75%", "Max"); printf("%-12.6g %-12.6g %-12.6g %-12.6g %-12.6g\n", min, first, med, third, max); printf("%-12s %-12s\n", "H.Mean", "H.StdErr"); printf("%-12.6g %-12.6g\n", hmean, hstderr); /* printf("%.3lf\t%.3lf\n", hmean, stddev); */ } free(bw); } void print_data(double *buf, size_t len){ for (size_t i = 0; i < len; i++){ printf("%.0lf ", buf[i]); } printf("\n"); } void print_sizet(size_t *buf, size_t len){ for (size_t i = 0; i < len; i++){ printf("%zu ", buf[i]); } printf("\n"); } void emit_configs(struct run_config *rc, int nconfigs); uint64_t isqrt(uint64_t x); uint64_t icbrt(uint64_t x); int main(int argc, char **argv) { // ======================================= // Declare Variables // ======================================= // source and target are used for the gather and scatter operations. // data is gathered from source and placed into target sgDataBuf source; sgDataBuf target; // OpenCL Specific #ifdef USE_OPENCL size_t global_work_size = 1; char *kernel_string; cl_uint work_dim = 1; #endif // ======================================= // Parse Command Line Arguments // ======================================= struct run_config *rc; int nrc = 0; parse_args(argc, argv, &nrc, &rc); if (nrc <= 0) { error("No run configurations parsed", ERROR); } // If indices span many pages, compress them so that there are no // pages in the address space which are never accessed // Pages are assumed to be 4KiB if (compress_flag) { for (int i = 0; i < nrc; i++) { compress_indices(rc[i].pattern, rc[i].pattern_len); } } struct run_config *rc2 = rc; // Allocate space for timing and papi counter information for (int i = 0; i < nrc; i++) { rc2[i].time_ms = (double*)malloc(sizeof(double) * rc2[i].nruns); #ifdef USE_PAPI rc2[i].papi_ctr = (long long **)malloc(sizeof(long long *) * rc2[i].nruns); for (int j = 0; j < rc2[i].nruns; j++){ rc2[i].papi_ctr[j] = (long long*)malloc(sizeof(long long) * papi_nevents); } #endif } // ======================================= // Initialize PAPI Library // ======================================= #ifdef USE_PAPI // Powering up a space shuttle probably has fewer checks than initlizing papi int err = PAPI_library_init(PAPI_VER_CURRENT); if (err !=PAPI_VER_CURRENT && err > 0) { error ("PAPI library version mismatch", ERROR); } if (err < 0) papi_err(err, __LINE__, __FILE__); err = PAPI_is_initialized(); if (err != PAPI_LOW_LEVEL_INITED) { error ("PAPI was not initialized", ERROR); } // OK, now that papi is finally inizlized, we need to make our EventSet // First, convert names to codes for (int i = 0; i < papi_nevents; i++) { papi_err(PAPI_event_name_to_code(papi_event_names[i],&papi_event_codes[i]), __LINE__, __FILE__); } int EventSet = PAPI_NULL; papi_err(PAPI_create_eventset(&EventSet), __LINE__, __FILE__); for (int i = 0; i < papi_nevents; i++) { papi_err(PAPI_add_event(EventSet, papi_event_codes[i]), __LINE__, __FILE__); } #endif // ======================================= // Initialize OpenCL Backend // ======================================= /* Create a context and corresponding queue */ #ifdef USE_OPENCL if (backend == OPENCL) { initialize_dev_ocl(platform_string, device_string); } #endif // ======================================= // Compute Buffer Sizes // ======================================= if (rc2[0].kernel != GATHER && rc2[0].kernel != SCATTER) { printf("Error: Unsupported kernel\n"); exit(1); } size_t max_source_size = 0; size_t max_target_size = 0; size_t max_pat_len = 0; size_t max_ptrs = 0; size_t max_ro_len = 0; for (int i = 0; i < nrc; i++) { size_t max_pattern_val = rc2[i].pattern[0]; for (size_t j = 0; j < rc2[i].pattern_len; j++) { if (rc2[i].pattern[j] > max_pattern_val) { max_pattern_val = rc2[i].pattern[j]; } } //printf("count: %zu, delta: %zu, %zu\n", rc2[i].generic_len, rc2[i].delta, rc2[i].generic_len*rc2[i].delta); size_t cur_source_size = ((max_pattern_val + 1) + (rc2[i].generic_len-1)*rc2[i].delta) * sizeof(sgData_t); //printf("max_pattern_val: %zu, source_size %zu\n", max_pattern_val, cur_source_size); if (cur_source_size > max_source_size) { max_source_size = cur_source_size; } size_t cur_target_size = rc2[i].pattern_len * sizeof(sgData_t) * rc2[i].wrap; if (cur_target_size > max_target_size) { max_target_size = cur_target_size; } if (rc2[i].omp_threads > max_ptrs) { max_ptrs = rc2[i].omp_threads; } if (rc2[i].pattern_len > max_pat_len) { max_pat_len = rc2[i].pattern_len; } if (rc2[i].ro_morton == 1) { rc2[i].ro_order = z_order_1d(rc2[i].generic_len, rc2[i].ro_block); } else if (rc2[i].ro_morton == 2) { rc2[i].ro_order = z_order_2d(isqrt(rc2[i].generic_len), rc2[i].ro_block); } else if (rc2[i].ro_morton == 3) { rc2[i].ro_order = z_order_3d(icbrt(rc2[i].generic_len), rc2[i].ro_block); } if (rc2[i].ro_hilbert == 1) { //yes, use z order function rc2[i].ro_order = z_order_1d(rc2[i].generic_len, rc2[i].ro_block); } else if (rc2[i].ro_hilbert == 2) { error ("Not yet implemented", ERROR); } else if (rc2[i].ro_hilbert == 3) { rc2[i].ro_order = h_order_3d(icbrt(rc2[i].generic_len), rc2[i].ro_block); } if ((rc2[i].ro_hilbert || rc2[i].ro_morton) && !rc2[i].ro_order) { error("Unable to generate reorder pattern.", ERROR); } if (rc2[i].ro_morton || rc[i].ro_morton) { if (rc2[i].generic_len > max_ro_len) { max_ro_len = rc2[i].generic_len; } } } source.size = max_source_size; source.len = source.size / sizeof(sgData_t); target.size = max_target_size; target.len = target.size / sizeof(sgData_t); target.nptrs = max_ptrs; // ======================================= // Create OpenCL Kernel // ======================================= #ifdef USE_OPENCL if (backend == OPENCL) { //kernel_string = ocl_kernel_gen(index_len, vector_len, kernel); kernel_string = read_file(kernel_file); sgp = kernel_from_string(context, kernel_string, kernel_name, NULL); if (kernel_string) { free(kernel_string); } } #endif // ======================================= // Create Host Buffers, Fill With Data // ======================================= source.host_ptr = (sgData_t*) sp_malloc(source.size, 1, ALIGN_CACHE); // replicate the target space for every thread target.host_ptrs = (sgData_t**) sp_malloc(sizeof(sgData_t*), target.nptrs, ALIGN_CACHE); for (size_t i = 0; i < target.nptrs; i++) { target.host_ptrs[i] = (sgData_t*) sp_malloc(target.size, 1, ALIGN_PAGE); #ifdef VALIDATE if (validate_flag) { // Fill target buffer with data for validation purposes random_data(target.host_ptrs[i], target.len); } #endif } // printf("-- here -- \n"); // Populate buffers on host #pragma omp parallel for for (int i = 0; i < source.len; i++) { source.host_ptr[i] = i % (source.len / 64); } random_data(source.host_ptr, source.len); // ======================================= // Create Device Buffers, Transfer Data // ======================================= #ifdef USE_OPENCL if (backend == OPENCL) { //TODO: Rewrite to not take index buffers //create_dev_buffers_ocl(&source, &target, &si, &ti); } #endif #ifdef USE_CUDA sgIdx_t *pat_dev; uint32_t *order_dev; if (backend == CUDA) { //TODO: Rewrite to not take index buffers create_dev_buffers_cuda(&source); cudaMalloc((void**)&pat_dev, sizeof(sgIdx_t) * max_pat_len); cudaMalloc((void**)&order_dev, sizeof(uint32_t) * max_ro_len); cudaMemcpy(source.dev_ptr_cuda, source.host_ptr, source.size, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); } int final_block_idx = -1; int final_thread_idx = -1; double final_gather_data = -1; #endif // ======================================= // Execute Benchmark // ======================================= // Print some header info /* if (print_header_flag) { print_system_info(); emit_configs(rc2, nrc); print_header(); } */ if (quiet_flag < 1) { print_system_info(); } if (quiet_flag < 2) { emit_configs(rc2, nrc); } if (quiet_flag < 3) { print_header(); } // Print config info for (int k = 0; k < nrc; k++) { // Time OpenCL Kernel #ifdef USE_OPENCL if (backend == OPENCL) { } #endif // USE_OPENCL // Time CUDA Kernel #ifdef USE_CUDA int wpt = 1; if (backend == CUDA) { float time_ms = 2; for (int i = -10; i < (int)rc2[k].nruns; i++) { #define arr_len (1) unsigned long global_work_size = rc2[k].generic_len / wpt * rc2[k].pattern_len; unsigned long local_work_size = rc2[k].local_work_size; unsigned long grid[arr_len] = {global_work_size/local_work_size}; unsigned long block[arr_len] = {local_work_size}; if (rc2[k].random_seed == 0) { time_ms = cuda_block_wrapper(arr_len, grid, block, rc2[k].kernel, source.dev_ptr_cuda, pat_dev, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, wpt, rc2[k].ro_morton, rc2[k].ro_order, order_dev, rc[k].stride_kernel, &final_block_idx, &final_thread_idx, &final_gather_data, validate_flag); } else { time_ms = cuda_block_random_wrapper(arr_len, grid, block, rc2[k].kernel, source.dev_ptr_cuda, pat_dev, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, wpt, rc2[k].random_seed); } if (i>=0) rc2[k].time_ms[i] = time_ms; } } #endif // USE_CUDA // Time OpenMP Kernel #ifdef USE_OPENMP if (backend == OPENMP) { omp_set_num_threads(rc2[k].omp_threads); // Start at -1 to do a cache warm for (int i = -1; i < (int)rc2[k].nruns; i++) { if (i!=-1) sg_zero_time(); #ifdef USE_PAPI if (i!=-1) profile_start(EventSet, __LINE__, __FILE__); #endif switch (rc2[k].kernel) { case SG: if (rc2[k].op == OP_COPY) { //sg_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr,index_len); } else { //sg_accum_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr, index_len); } break; case SCATTER: if (rc2[k].random_seed >= 1) { scatter_smallbuf_random(source.host_ptr, target.host_ptrs, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, rc2[k].random_seed); } else if (rc2[k].op == OP_COPY) { scatter_smallbuf(source.host_ptr, target.host_ptrs, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap); // scatter_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr, index_len); } else { // scatter_accum_omp (target.host_ptr, ti.host_ptr, source.host_ptr, si.host_ptr, index_len); } break; case GATHER: if (rc2[k].random_seed >= 1) { gather_smallbuf_random(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, rc2[k].random_seed); } else if (rc2[k].deltas_len <= 1) { if (rc2[k].ro_morton || rc2[k].ro_hilbert) { gather_smallbuf_morton(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap, rc2[k].ro_order); } else { gather_smallbuf(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap); } } else { gather_smallbuf_multidelta(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].deltas_ps, rc2[k].generic_len, rc2[k].wrap, rc2[k].deltas_len); } break; default: printf("Error: Unable to determine kernel\n"); break; } #ifdef USE_PAPI if (i!= -1) profile_stop(EventSet, rc2[k].papi_ctr[i], __LINE__, __FILE__); #endif if (i!= -1) rc2[k].time_ms[i] = sg_get_time_ms(); } //report_time2(rc2, nrc); } #endif // USE_OPENMP // Time Serial Kernel #ifdef USE_SERIAL if (backend == SERIAL) { for (int i = 0; i <= rc2[k].nruns; i++) { if (i!=-1) sg_zero_time(); #ifdef USE_PAPI if (i!=-1) profile_start(EventSet, __LINE__, __FILE__); #endif //TODO: Rewrite serial kernel switch (rc2[k].kernel) { case SCATTER: scatter_smallbuf_serial(source.host_ptr, target.host_ptrs, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap); break; case GATHER: gather_smallbuf_serial(target.host_ptrs, source.host_ptr, rc2[k].pattern, rc2[k].pattern_len, rc2[k].delta, rc2[k].generic_len, rc2[k].wrap); break; default: printf("Error: Unable to determine kernel\n"); break; } //double time_ms = sg_get_time_ms(); //if (i!=0) report_time(k, time_ms/1000., rc2[k], i); #ifdef USE_PAPI if (i!= -1) profile_stop(EventSet, rc2[k].papi_ctr[i], __LINE__, __FILE__); #endif if (i!= -1) rc2[k].time_ms[i] = sg_get_time_ms(); } } #endif // USE_SERIAL } report_time2(rc2, nrc); #ifdef USE_CUDA cudaMemcpy(source.host_ptr, source.dev_ptr_cuda, source.size, cudaMemcpyDeviceToHost); #endif int good = 0; int bad = 0; for (int i = 0; i < source.len; i++) { if (source.host_ptr[i] == 1337.) { good++; }else { bad++; } } //printf("\ngood: %d, bad: %d\n", good, bad); // ======================================= // Validation // ======================================= #ifdef VALIDATE if(validate_flag) { // Validate that the last item written to buffer is actually there // Supported kernels for this last write validation are: // // OPENMP: // scatter_smallbuf // gather_smallbuf // gather_smallbuf_morton // // CUDA: // scatter_block // gather_block // gather_block_morton // gather_block_stride #ifdef USE_OPENMP if (backend == OPENMP) { // use the last run config struct run_config *rc_final = rc2 + (nrc - 1); if (rc_final->op == OP_COPY) { //accum kernel validation currently not supported char is_written_data_missing = 1; // sgData_t *source_data_ptr = source.host_ptr + rc_final->delta * (rc_final->generic_len - 1); if (rc_final->ro_morton || rc_final->ro_hilbert) { source_data_ptr = source.host_ptr + rc_final->delta * rc_final->ro_order[rc_final->generic_len - 1]; } else { source_data_ptr = source.host_ptr + rc_final->delta * (rc_final->generic_len - 1); } // we don't know which thread wrote the data, so check all targets for (int t = 0; t < rc_final->omp_threads; t++) { sgData_t *target_data_ptr = target.host_ptrs[t] + rc_final->pattern_len * ((rc_final->generic_len - 1) % rc_final->wrap); //check that all data in the pattern is written char matches_pattern = 1; for (int d = 0; d < rc_final->pattern_len; d++) { if (source_data_ptr[rc_final->pattern[d]] != target_data_ptr[d]) { matches_pattern = 0; break; } } if (matches_pattern) { is_written_data_missing = 0; break; } } if (is_written_data_missing) { printf("VALIDATION ERROR: The data that was supposed to be last written to buffer is missing\n"); } } } #endif #ifdef USE_CUDA if (backend == CUDA) { char is_written_data_missing = 1; struct run_config *rc_final = rc2 + (nrc - 1); size_t V = rc_final->pattern_len; double src = (source.host_ptr + (final_block_idx * (rc_final->local_work_size / V) + final_thread_idx / V) * rc_final->delta)[rc_final->pattern[final_thread_idx % V]]; if (rc_final->kernel == SCATTER) { is_written_data_missing = src != rc_final->pattern[final_thread_idx % V]; } else if (rc_final->kernel == GATHER) { if (rc_final->ro_morton) { src = (source.host_ptr + (final_block_idx * (rc_final->local_work_size / V) + rc_final->ro_order[final_thread_idx / V]) * rc_final->delta)[rc_final->pattern[final_thread_idx % V]]; } else if (rc_final->stride_kernel >= 0) { src = (source.host_ptr + (final_block_idx * (rc_final->local_work_size / V) + final_thread_idx / V) * rc_final->delta)[rc_final->pattern[rc_final->stride_kernel * (final_thread_idx % V)]]; } is_written_data_missing = src != final_gather_data; } if (is_written_data_missing) { printf("VALIDATION ERROR: The data that was supposed to be last written to buffer is missing\n"); } } #endif } #endif // Free Memory free(source.host_ptr); for (size_t i = 0; i < target.nptrs; i++) { free(target.host_ptrs[i]); } if (target.nptrs != 0) { free(target.host_ptrs); } for (int i = 0; i < nrc; i++) { if (rc2[i].ro_order) { free(rc2[i].ro_order); } free(rc2[i].time_ms); #ifdef USE_PAPI for (int j = 0; j < rc2[i].nruns; j++){ free(rc2[i].papi_ctr[j]); } free(rc2[i].papi_ctr); #endif } free(rc); //printf("Mem used: %lld MiB\n", get_mem_used()/1024/1024); } void emit_configs(struct run_config *rc, int nconfigs) { printf("Run Configurations\n"); printf("[ "); for (int i = 0; i < nconfigs; i++) { if (i != 0) { printf(" "); } printf("{"); // Pattern Type printf("\'name\':\'%s\', ", rc[i].name); // Kernel switch (rc[i].kernel) { case GATHER: printf("\'kernel\':\'Gather\', "); break; case SCATTER: printf("\'kernel\':\'Scatter\', "); break; case SG: printf("\'kernel\':\'GS\', "); break; case INVALID_KERNEL: error ("Invalid kernel sent to emit_configs", ERROR); break; } // Pattern printf("\'pattern\':["); for (int j = 0; j < rc[i].pattern_len; j++) { printf("%zu", rc[i].pattern[j]); if (j != rc[i].pattern_len-1) { printf(","); } } printf("], "); //Delta //TODO: multidelta if (rc[i].deltas_len == 1) { printf("\'delta\':%zd", rc[i].delta); } else { printf("\'deltas\':["); for (int j = 0; j < rc[i].deltas_len; j++) { printf("%zu", rc[i].deltas[j]); if (j != rc[i].deltas_len-1) { printf(","); } } printf("]"); } printf(", "); // Len printf("\'length\':%zu, ", rc[i].generic_len); if (rc[i].random_seed > 0) { printf("\'seed\':%zu, ", rc[i].random_seed); } // Aggregate if (aggregate_flag) { printf("\'agg\':%zu, ", rc[i].nruns); } // Wrap if (aggregate_flag) { printf("\'wrap\':%zu, ", rc[i].wrap); } // OpenMP Threads if (backend == OPENMP) { printf("\'threads\':%zu", rc[i].omp_threads); } // OpenMP Threads if (rc[i].stride_kernel!=-1) { printf("\'stride_kernel\':%d", rc[i].stride_kernel); } // Morton if (rc[i].ro_morton) { printf(", \'morton\':%d", rc[i].ro_morton); } // Morton if (rc[i].ro_hilbert) { printf(", \'hilbert\':%d", rc[i].ro_hilbert); } if (rc[i].ro_morton || rc[i].ro_hilbert) { printf(", \'roblock\':%d", rc[i].ro_block); } printf("}"); if (i != nconfigs-1) { printf(",\n"); } } printf(" ]\n\n"); } // From http://www.codecodex.com/wiki/Calculate_an_integer_square_root uint64_t isqrt(uint64_t x) { uint64_t op, res, one; op = x; res = 0; /* "one" starts at the highest power of four <= than the argument. */ one = 1 << 30; /* second-to-top bit set */ while (one > op) one >>= 2; while (one != 0) { if (op >= res + one) { op -= res + one; res += one << 1; // <-- faster than 2 * one } res >>= 1; one >>= 2; } return res; } // From https://gist.github.com/anonymous/729557 uint64_t icbrt(uint64_t x) { int s; uint64_t y; uint64_t b; y = 0; for (s = 63; s >= 0; s -= 3) { y += y; b = 3*y*((uint64_t) y + 1) + 1; if ((x >> s) >= b) { x -= b << s; y++; } } return y; }
serial_teams.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt, multicpu // UNSUPPORTED: gcc, icc-19 #include "callback.h" int main() { #pragma omp target teams num_teams(2) thread_limit(1) #pragma omp parallel num_threads(1) { printf("In teams parallel\n"); } return 0; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_0:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1 // CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_begin: // CHECK-SAME: parent_task_id=[[INIT_TASK]] // CHECK-SAME: {{.*}} requested_num_teams=2 // CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]] // // team 0 // // initial task in the teams construct // CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=2, index=0 // parallel region forked by runtime // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0 // CHECK: {{^}}[[MASTER_0]]: ompt_event_teams_end: // CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]] // CHECK: {{^}}[[MASTER_0]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1 // // team 1 // // initial task in the teams construct // CHECK: {{^}}[[MASTER_1:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_1:[0-9]+]], actual_parallelism=2, index=1 // parallel region forked by runtime // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_1]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1:[0-9]+]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[IMPL_TASK_1:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_1]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11:[0-9]+]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_11]], task_id=[[IMPL_TASK_1]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_1]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_ID_1]], task_id=[[INIT_TASK_1]] // CHECK: {{^}}[[MASTER_1]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_1]], actual_parallelism=0, index=1
coin_flip_omp.c
/* * This program is the first of threes exercise in the OnRamp to * Parallel Computing - Monte Carlo Module. We will flip a coin, * simulated using rand_r(), many times and evaluate the randomness of * the results using a chi-squared test. This exercise is derived from * Libby Shoop's CS in Parallel Monte Carlo Module. * * History: * Dave Valentine (Slippery Rock University): Original C++ program * Libby Shoop (Macalester University) : Adapted for CS in * Parallel Module * Justin Ragatz (UW-La Crosse) : Adapted for OnRamp Module * rewritten in C. */ #include "coin_flip_omp.h" int main(int argc, char *argv[]) { unsigned long long num_flips = 0; unsigned long long num_heads = 0; unsigned long long num_tails = 0; int n_threads = 1; int tid; unsigned long long trial_flips = FLIPS_PER_TRIAL; unsigned long long max_flips = FLIPS_PER_TRIAL * (1LLU<<TRIALS); double start_time = -1; double end_time = -1; // Get number of threads if (argc > 1) { n_threads = atoi(argv[1]); if (n_threads > 32) { n_threads = 32; } } create_strings(); /* Malloc and initialize strings. */ /* Print introduction. */ printf("\n Settings: \n" ); printf(" Trials : %llu\n", TRIALS ); printf(" Flips per trial: %llu\n", FLIPS_PER_TRIAL); printf(" Threads : %d\n", n_threads ); printf("\n Begin Simulation... \n" ); /* Print table heading. */ printf("\n ----------------------------------------" "----------------------------------------\n"); printf(" | %15s | %15s | %15s | %11s | %8s |\n", "Trials", "Heads", "Tails", "Chi Squared", "Time"); printf(" ----------------------------------------" "----------------------------------------\n"); /* Run the simulation. */ while (trial_flips <= max_flips) { num_heads = 0; num_tails = 0; start_time = omp_get_wtime(); #pragma omp parallel num_threads(n_threads) default(none) \ private(num_flips, tid) shared(trial_flips, seeds) \ reduction(+:num_heads, num_tails) { tid = omp_get_thread_num(); seeds[tid] = abs( ( (time(NULL) * 181) * ( (tid - 83) * 359 ) ) % 104729 ); #pragma omp for for (num_flips = 0; num_flips < trial_flips; num_flips++) { if (rand_r(&seeds[tid]) % 2 == 0) { num_heads++; } else { num_tails++; } } } end_time = omp_get_wtime(); pretty_int(trial_flips, trial_string); pretty_int(num_heads , heads_string); pretty_int(num_tails , tails_string); printf(" | %15s | %15s | %15s | %11.2f | %8.2f |\n", trial_string, heads_string, tails_string, chi_squared(num_heads, num_tails), (double)(end_time - start_time)); trial_flips *= 2; } printf(" ----------------------------------------" "----------------------------------------\n"); clean_exit(0); return 0; } double chi_squared(unsigned long long heads, unsigned long long tails) { double sum = 0; // chi square sum double tot = heads + tails; // total flips double expected = 0.5 * tot; // expected heads (or tails) sum = ((heads - expected) * (heads - expected) / expected) + ((tails - expected) * (tails - expected) / expected); return sum; } int pretty_int(unsigned long long n, char* s) { int extra = 0; int commas = 0; int count = 0; int len = 0; int i; if (NULL == s) return -1; len = sprintf(s, "%llu", n); if ( len > STRING_LEN ) { printf("Buffer overflow, cannot print string.\n"); s = NULL; return -1; } extra = strlen(s) % 3; commas = (strlen(s) - extra) / 3; if (0 == extra) commas--; s[strlen(s) + commas] = '\0'; for (i = strlen(s) - 1; i > 0; i--) { count++; count = count % 3; if (0 == count) { s[i + commas] = s[i]; commas--; s[i + commas] = ','; } else { s[i + commas] = s[i]; } } return 0; } int create_strings() { int i; trial_string = (char*) malloc (sizeof(char) * STRING_LEN); if (NULL == trial_string) { fprintf(stderr, "Error: Malloc for trial_string failed.\n"); clean_exit(-1); } heads_string = (char*) malloc (sizeof(char) * STRING_LEN); if (NULL == heads_string) { fprintf(stderr, "Error: Malloc for heads_string failed.\n"); clean_exit(-1); } tails_string = (char*) malloc (sizeof(char) * STRING_LEN); if (NULL == tails_string) { fprintf(stderr, "Error: Malloc for tails_string failed.\n"); clean_exit(-1); } for (i = 0; i < STRING_LEN - 1; i++) { trial_string[i] = ' '; heads_string[i] = ' '; tails_string[i] = ' '; } trial_string[STRING_LEN - 1] = '\0'; heads_string[STRING_LEN - 1] = '\0'; tails_string[STRING_LEN - 1] = '\0'; return 0; } int clean_exit(int status) { free(trial_string); free(heads_string); free(tails_string); trial_string = NULL; heads_string = NULL; tails_string = NULL; if (status == 0) { printf("\n Normal termination.\n\n"); } else { fprintf(stderr, "\n Terminated by error.\n\n"); exit(EXIT_FAILURE); } return 0; }
pooling_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld2 {v0.4s, v1.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v4.4s, v5.4s}, [%3], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld2 {v6.4s, v7.4s}, [%1], #32 \n" "fmax v12.4s, v0.4s, v1.4s \n" "fmax v13.4s, v2.4s, v3.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "fmax v14.4s, v4.4s, v5.4s \n" "ext v0.16b, v0.16b, v6.16b, #4 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v10.4s, v11.4s}, [%3], #32 \n" "ext v2.16b, v2.16b, v8.16b, #4 \n" "fmax v12.4s, v12.4s, v0.4s \n" "ext v4.16b, v4.16b, v10.16b, #4 \n" "fmax v13.4s, v13.4s, v2.4s \n" "fmax v14.4s, v14.4s, v4.4s \n" "fmax v12.4s, v12.4s, v13.4s \n" "orr v0.16b, v6.16b, v6.16b \n" "orr v1.16b, v7.16b, v7.16b \n" "fmax v12.4s, v12.4s, v14.4s \n" "orr v2.16b, v8.16b, v8.16b \n" "orr v3.16b, v9.16b, v9.16b \n" "orr v4.16b, v10.16b, v10.16b \n" "orr v5.16b, v11.16b, v11.16b \n" "subs %w0, %w0, #1 \n" "st1 {v12.4s}, [%4], #16 \n" "bne 0b \n" "sub %1, %1, #32 \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14" ); } #else if (nn > 0) { asm volatile( "pld [%1, #256] \n" "vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld2.f32 {d8-d11}, [%3]! \n" "0: \n" "pld [%1, #256] \n" "vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15 "vmax.f32 q12, q0, q1 \n" "vmax.f32 q13, q2, q3 \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" "vmax.f32 q14, q4, q5 \n" "vext.32 q0, q0, q6, #1 \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" "vext.32 q2, q2, q8, #1 \n" "vmax.f32 q12, q12, q0 \n" "vext.32 q4, q4, q10, #1 \n" "vmax.f32 q13, q13, q2 \n" "vmax.f32 q14, q14, q4 \n" "vmax.f32 q12, q12, q13 \n" "vorr q0, q6, q6 \n" "vorr q1, q7, q7 \n" "vmax.f32 q12, q12, q14 \n" "vorr q2, q8, q8 \n" "vorr q3, q9, q9 \n" "vorr q4, q10, q10 \n" "vorr q5, q11, q11 \n" "subs %0, #1 \n" "vst1.f32 {d24-d25}, [%4]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #32 \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(std::max(r0[0], r0[1]), r0[2]); float max1 = std::max(std::max(r1[0], r1[1]), r1[2]); float max2 = std::max(std::max(r2[0], r2[1]), r2[2]); *outptr = std::max(std::max(max0, max1), max2); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep;//1 + w; r1 += tailstep;//1 + w; r2 += tailstep;//1 + w; } } }
oneWayFunction.c
/* Copyright 2016-2018 The Pop Core Foundation */ #include "oneWayFunction.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <assert.h> // #include <omp.h> #include "my_time.h" #include "common.h" // OpenSSL Library #include "c_sha1.h" #include "c_sha256.h" #include "c_sha512.h" #include "c_sha3_256.h" #include "c_whirlpool.h" #include "c_ripemd160.h" #include "c_blake2s256.h" #include "c_aes128.h" #include "c_des.h" #include "c_crc32.h" #include "c_hmac_md5.h" #include "c_rc4.h" #include "c_camellia128.h" // JTR source code #include "c_gost.h" #include "c_haval5_256.h" #include "c_skein512_256.h" OneWayFunctionInfor funcInfor[FUNCTION_NUM] = { "SHA3-256", crypto_sha3_256, "SHA1", crypto_sha1, "SHA256", crypto_sha256, "SHA512", crypto_sha512, "Whirlpool", crypto_whirlpool, "RIPEMD-160", crypto_ripemd160, "BLAKE2s(256bits)", crypto_blake2s256, "AES(128bits)", crypto_aes128, "DES", crypto_des, "RC4", crypto_rc4, "Camellia(128bits)", crypto_camellia128, "CRC32", crypto_crc32, "HMAC(MD5)", crypto_hmac_md5, "GOST R 34.11-94", crypto_gost, "HAVAL-256/5", crypto_haval5_256, "Skein-512(256bits)", crypto_skein512_256 }; void initOneWayFunction() { gost_init_table(); CRC32_Table_Init(); } /* void testOneWayFunction(const char *mess, const int64_t iterNum) { int64_t j; uint32_t messLen = (uint32_t)strlen(mess); uint8_t input[INPUT_LEN], output[FUNCTION_NUM][OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); printf("**************************** Correctness test (One way function) ****************************\n"); printf("Test message: %s\n", mess); for (int i = 0; i < FUNCTION_NUM; ++i) { printf("%02d ", i); funcInfor[i].func(input, messLen, output[i]); view_data_u8(funcInfor[i].funcName, output[i], OUTPUT_LEN); } printf("*********************************************************************************************\n"); printf("************************************************* Performance test (One way function) *************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); for (int i = 0; i < FUNCTION_NUM; ++i) { printf("%02d %-18s\t", i, funcInfor[i].funcName); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { funcInfor[i].func(input, messLen, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { funcInfor[i].func(input, messLen, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f Kps ", iterNum / 1000 / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output[i], result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %u, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output[i], OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); } if (NULL != result) { free(result); result = NULL; } printf("***************************************************************************************************************************************\n"); } */
GB_binop__isgt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_uint8 // A.*B function (eWiseMult): GB_AemultB__isgt_uint8 // A*D function (colscale): GB_AxD__isgt_uint8 // D*A function (rowscale): GB_DxB__isgt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__isgt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__isgt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint8 // C=scalar+B GB_bind1st__isgt_uint8 // C=scalar+B' GB_bind1st_tran__isgt_uint8 // C=A+scalar GB_bind2nd__isgt_uint8 // C=A'+scalar GB_bind2nd_tran__isgt_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "matrix.h" #include "util.h" #include "timer.h" #include <math.h> #if SPLATT_VAL_TYPEWIDTH == 32 void spotrf_(char *, int *, float *, int *, int *); void spotrs_(char *, int *, int *, float *, int *, float *, int *, int *); void ssyrk_(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); #define LAPACK_DPOTRF spotrf_ #define LAPACK_DPOTRS spotrs_ #define LAPACK_DSYRK ssyrk_ #else void dpotrf_(char *, int *, double *, int *, int *); void dpotrs_(char *, int *, int *, double *, int *, double *, int *, int *); void dsyrk_(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); #define LAPACK_DPOTRF dpotrf_ #define LAPACK_DPOTRS dpotrs_ #define LAPACK_DSYRK dsyrk_ #endif /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ static void p_mat_2norm( matrix_t * const A, val_t * const restrict lambda, rank_info * const rinfo, thd_info * const thds) { idx_t const I = A->I; idx_t const J = A->J; val_t * const restrict vals = A->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const mylambda = (val_t *) thds[tid].scratch[0]; for(idx_t j=0; j < J; ++j) { mylambda[j] = 0; } #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { mylambda[j] += vals[j + (i*J)] * vals[j + (i*J)]; } } /* do reduction on partial sums */ thd_reduce(thds, 0, J, REDUCE_SUM); #pragma omp master { #ifdef SPLATT_USE_MPI /* now do an MPI reduction to get the global lambda */ timer_start(&timers[TIMER_MPI_NORM]); timer_start(&timers[TIMER_MPI_IDLE]); MPI_Barrier(rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_IDLE]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_NORM]); #else memcpy(lambda, mylambda, J * sizeof(val_t)); #endif } #pragma omp barrier #pragma omp for schedule(static) for(idx_t j=0; j < J; ++j) { lambda[j] = sqrt(lambda[j]); } /* do the normalization */ #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { vals[j+(i*J)] /= lambda[j]; } } } /* end omp for */ } static void p_mat_maxnorm( matrix_t * const A, val_t * const restrict lambda, rank_info * const rinfo, thd_info * const thds) { idx_t const I = A->I; idx_t const J = A->J; val_t * const restrict vals = A->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const mylambda = (val_t *) thds[tid].scratch[0]; for(idx_t j=0; j < J; ++j) { mylambda[j] = 0; } #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { mylambda[j] = SS_MAX(mylambda[j], vals[j+(i*J)]); } } /* do reduction on partial maxes */ thd_reduce(thds, 0, J, REDUCE_MAX); #pragma omp master { #ifdef SPLATT_USE_MPI /* now do an MPI reduction to get the global lambda */ timer_start(&timers[TIMER_MPI_NORM]); timer_start(&timers[TIMER_MPI_IDLE]); MPI_Barrier(rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_IDLE]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_MAX, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_NORM]); #else memcpy(lambda, mylambda, J * sizeof(val_t)); #endif } #pragma omp barrier #pragma omp for schedule(static) for(idx_t j=0; j < J; ++j) { lambda[j] = SS_MAX(lambda[j], 1.); } /* do the normalization */ #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { vals[j+(i*J)] /= lambda[j]; } } } /* end omp parallel */ } /** * @brief Solve the system LX = B. * * @param L The lower triangular matrix of coefficients. * @param B The right-hand side which is overwritten with X. */ static void p_mat_forwardsolve( matrix_t const * const L, matrix_t * const B) { /* check dimensions */ idx_t const N = L->I; val_t const * const restrict lv = L->vals; val_t * const restrict bv = B->vals; /* first row of X is easy */ for(idx_t j=0; j < N; ++j) { bv[j] /= lv[0]; } /* now do forward substitution */ for(idx_t i=1; i < N; ++i) { /* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */ for(idx_t j=0; j < i; ++j) { for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] -= lv[j+(i*N)] * bv[f+(j*N)]; } } for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] /= lv[i+(i*N)]; } } } /** * @brief Solve the system UX = B. * * @param U The upper triangular matrix of coefficients. * @param B The right-hand side which is overwritten with X. */ static void p_mat_backwardsolve( matrix_t const * const U, matrix_t * const B) { /* check dimensions */ idx_t const N = U->I; val_t const * const restrict rv = U->vals; val_t * const restrict bv = B->vals; /* last row of X is easy */ for(idx_t f=0; f < N; ++f) { idx_t const i = N-1; bv[f+(i*N)] /= rv[i+(i*N)]; } /* now do backward substitution */ for(idx_t row=2; row <= N; ++row) { /* operate with (N - row) to make unsigned comparisons easy */ idx_t const i = N - row; /* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */ for(idx_t j=i+1; j < N; ++j) { for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] -= rv[j+(i*N)] * bv[f+(j*N)]; } } for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] /= rv[i+(i*N)]; } } } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void mat_syminv( matrix_t * const A) { /* check dimensions */ assert(A->I == A->J); idx_t const N = A->I; matrix_t * L = mat_alloc(N, N); /* do a Cholesky factorization on A */ mat_cholesky(A, L); /* setup identity matrix */ memset(A->vals, 0, N*N*sizeof(val_t)); for(idx_t n=0; n < N; ++n) { A->vals[n+(n*N)] = 1.; } /* Solve L*Y = I */ p_mat_forwardsolve(L, A); /* transpose L */ for(idx_t i=0; i < N; ++i) { for(idx_t j=i+1; j < N; ++j) { L->vals[j+(i*N)] = L->vals[i+(j*N)]; L->vals[i+(j*N)] = 0.; } } /* Solve U*A = Y */ p_mat_backwardsolve(L, A); mat_free(L); } void mat_cholesky( matrix_t const * const A, matrix_t * const L) { /* check dimensions */ assert(A->I == A->J); assert(A->I == L->J); assert(L->I == L->J); idx_t const N = A->I; val_t const * const restrict av = A->vals; val_t * const restrict lv = L->vals; memset(lv, 0, N*N*sizeof(val_t)); for (idx_t i = 0; i < N; ++i) { for (idx_t j = 0; j <= i; ++j) { val_t inner = 0; for (idx_t k = 0; k < j; ++k) { inner += lv[k+(i*N)] * lv[k+(j*N)]; } if(i == j) { lv[j+(i*N)] = sqrt(av[i+(i*N)] - inner); } else { lv[j+(i*N)] = 1.0 / lv[j+(j*N)] * (av[j+(i*N)] - inner); } } } } void mat_aTa_hada( matrix_t ** mats, idx_t const start, idx_t const nmults, idx_t const nmats, matrix_t * const buf, matrix_t * const ret) { idx_t const F = mats[0]->J; /* check matrix dimensions */ assert(ret->I == ret->J); assert(ret->I == F); assert(buf->I == F); assert(buf->J == F); assert(ret->vals != NULL); assert(mats[0]->rowmajor); assert(ret->rowmajor); val_t * const restrict rv = ret->vals; val_t * const restrict bufv = buf->vals; for(idx_t i=0; i < F; ++i) { for(idx_t j=i; j < F; ++j) { rv[j+(i*F)] = 1.; } } for(idx_t mode=0; mode < nmults; ++mode) { idx_t const m = (start+mode) % nmats; idx_t const I = mats[m]->I; val_t const * const Av = mats[m]->vals; memset(bufv, 0, F * F * sizeof(val_t)); /* compute upper triangular matrix */ for(idx_t i=0; i < I; ++i) { for(idx_t mi=0; mi < F; ++mi) { for(idx_t mj=mi; mj < F; ++mj) { bufv[mj + (mi*F)] += Av[mi + (i*F)] * Av[mj + (i*F)]; } } } /* hadamard product */ for(idx_t mi=0; mi < F; ++mi) { for(idx_t mj=mi; mj < F; ++mj) { rv[mj + (mi*F)] *= bufv[mj + (mi*F)]; } } } /* copy to lower triangular matrix */ for(idx_t i=1; i < F; ++i) { for(idx_t j=0; j < i; ++j) { rv[j + (i*F)] = rv[i + (j*F)]; } } } void mat_aTa( matrix_t const * const A, matrix_t * const ret, rank_info * const rinfo, thd_info * const thds, idx_t const nthreads) { timer_start(&timers[TIMER_ATA]); /* check matrix dimensions */ assert(ret->I == ret->J); assert(ret->I == A->J); assert(ret->vals != NULL); assert(A->rowmajor); assert(ret->rowmajor); idx_t const I = A->I; idx_t const F = A->J; val_t const * const restrict Av = A->vals; char uplo = 'L'; char trans = 'N'; /* actually do A * A' due to row-major ordering */ int N = (int) F; int K = (int) I; int lda = N; int ldc = N; val_t alpha = 1.; val_t beta = 0.; LAPACK_DSYRK(&uplo, &trans, &N, &K, &alpha, A->vals, &lda, &beta, ret->vals, &ldc); #ifdef SPLATT_USE_MPI timer_start(&timers[TIMER_MPI_ATA]); timer_start(&timers[TIMER_MPI_IDLE]); MPI_Barrier(rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_IDLE]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(MPI_IN_PLACE, ret->vals, F * F, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_ATA]); #endif timer_stop(&timers[TIMER_ATA]); } void mat_matmul( matrix_t const * const A, matrix_t const * const B, matrix_t * const C) { timer_start(&timers[TIMER_MATMUL]); /* check dimensions */ assert(A->J == B->I); assert(C->I == A->I); assert(C->J == B->J); val_t const * const restrict av = A->vals; val_t const * const restrict bv = B->vals; val_t * const restrict cv = C->vals; idx_t const M = A->I; idx_t const N = B->J; idx_t const Na = A->J; /* tiled matrix multiplication */ idx_t const TILE = 16; #pragma omp parallel for schedule(static) for(idx_t i=0; i < M; ++i) { for(idx_t jt=0; jt < N; jt += TILE) { for(idx_t kt=0; kt < Na; kt += TILE) { idx_t const JSTOP = SS_MIN(jt+TILE, N); for(idx_t j=jt; j < JSTOP; ++j) { val_t accum = 0; idx_t const KSTOP = SS_MIN(kt+TILE, Na); for(idx_t k=kt; k < KSTOP; ++k) { accum += av[k + (i*Na)] * bv[j + (k*N)]; } cv[j + (i*N)] += accum; } } } } timer_stop(&timers[TIMER_MATMUL]); } void mat_normalize( matrix_t * const A, val_t * const restrict lambda, splatt_mat_norm const which, rank_info * const rinfo, thd_info * const thds, idx_t const nthreads) { timer_start(&timers[TIMER_MATNORM]); splatt_omp_set_num_threads(nthreads); switch(which) { case MAT_NORM_2: p_mat_2norm(A, lambda, rinfo, thds); break; case MAT_NORM_MAX: p_mat_maxnorm(A, lambda, rinfo, thds); break; default: fprintf(stderr, "SPLATT: mat_normalize supports 2 and MAX only.\n"); abort(); } timer_stop(&timers[TIMER_MATNORM]); } void mat_solve_normals( idx_t const mode, idx_t const nmodes, matrix_t * * aTa, matrix_t * rhs, val_t const reg) { timer_start(&timers[TIMER_INV]); /* nfactors */ int const N = aTa[0]->J; /* form upper-triangual normal equations */ val_t * const restrict neqs = aTa[MAX_NMODES]->vals; #pragma omp parallel { /* first initialize */ #pragma omp for schedule(static, 1) for(int i=0; i < N; ++i) { neqs[i+(i*N)] = 1. + reg; for(int j=i+1; j < N; ++j) { neqs[j+(i*N)] = 1.; } } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } val_t const * const restrict mat = aTa[m]->vals; #pragma omp for schedule(static, 1) nowait for(int i=0; i < N; ++i) { for(int j=i; j < N; ++j) { neqs[j+(i*N)] *= mat[j+(i*N)]; } } } } /* omp parallel */ /* Cholesky factorization */ char uplo = 'L'; int order = N; int lda = N; int info; LAPACK_DPOTRF(&uplo, &order, neqs, &lda, &info); if(info) { fprintf(stderr, "SPLATT: DPOTRF returned %d\n", info); } /* Solve against rhs */ int nrhs = (int) rhs->I; int ldb = N; LAPACK_DPOTRS(&uplo, &order, &nrhs, neqs, &lda, rhs->vals, &ldb, &info); if(info) { fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info); } timer_stop(&timers[TIMER_INV]); } void calc_gram_inv( idx_t const mode, idx_t const nmodes, matrix_t ** aTa) { timer_start(&timers[TIMER_INV]); idx_t const rank = aTa[0]->J; val_t * const restrict av = aTa[MAX_NMODES]->vals; /* ata[MAX_NMODES] = hada(aTa[0], aTa[1], ...) */ for(idx_t x=0; x < rank*rank; ++x) { av[x] = 1.; } for(idx_t m=1; m < nmodes; ++m) { idx_t const madjust = (mode + m) % nmodes; val_t const * const vals = aTa[madjust]->vals; for(idx_t x=0; x < rank*rank; ++x) { av[x] *= vals[x]; } } /* M2 = M2^-1 */ mat_syminv(aTa[MAX_NMODES]); timer_stop(&timers[TIMER_INV]); } matrix_t * mat_alloc( idx_t const nrows, idx_t const ncols) { matrix_t * mat = (matrix_t *) splatt_malloc(sizeof(matrix_t)); mat->I = nrows; mat->J = ncols; mat->vals = (val_t *) splatt_malloc(nrows * ncols * sizeof(val_t)); mat->rowmajor = 1; return mat; } matrix_t * mat_rand( idx_t const nrows, idx_t const ncols) { matrix_t * mat = mat_alloc(nrows, ncols); val_t * const vals = mat->vals; fill_rand(vals, nrows * ncols); return mat; } void mat_free( matrix_t * mat) { free(mat->vals); free(mat); } matrix_t * mat_mkrow( matrix_t const * const mat) { assert(mat->rowmajor == 0); idx_t const I = mat->I; idx_t const J = mat->J; matrix_t * row = mat_alloc(I, J); val_t * const restrict rowv = row->vals; val_t const * const restrict colv = mat->vals; for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { rowv[j + (i*J)] = colv[i + (j*I)]; } } return row; } matrix_t * mat_mkcol( matrix_t const * const mat) { assert(mat->rowmajor == 1); idx_t const I = mat->I; idx_t const J = mat->J; matrix_t * col = mat_alloc(I, J); val_t * const restrict colv = col->vals; val_t const * const restrict rowv = mat->vals; for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { colv[i + (j*I)] = rowv[j + (i*J)]; } } col->rowmajor = 0; return col; } spmatrix_t * spmat_alloc( idx_t const nrows, idx_t const ncols, idx_t const nnz) { spmatrix_t * mat = (spmatrix_t*) splatt_malloc(sizeof(spmatrix_t)); mat->I = nrows; mat->J = ncols; mat->nnz = nnz; mat->rowptr = (idx_t*) splatt_malloc((nrows+1) * sizeof(idx_t)); mat->colind = (idx_t*) splatt_malloc(nnz * sizeof(idx_t)); mat->vals = (val_t*) splatt_malloc(nnz * sizeof(val_t)); return mat; } void spmat_free( spmatrix_t * mat) { free(mat->rowptr); free(mat->colind); free(mat->vals); free(mat); }
metadirective.c
#include <stdio.h> #include <omp.h> #define N 10 #define GPU_THREAD_COUNT 256 int check_device_kind_gpu_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(gpu)}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: device_kind_gpu_selector\n"); return 0; } return 1; } int check_device_kind_cpu_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(cpu, host)}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 1) { printf("Failed metadirective: device_kind_cpu_selector\n"); return 0; } return 1; } int check_device_arch_amdgcn_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {arch("amdgcn")}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: device_arch_amdgcn_selector\n"); return 0; } return 1; } int check_device_arch_x86_64_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {arch("x86_64")}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 1) { printf("Failed metadirective: device_arch_x86_64_selector\n"); return 0; } return 1; } int check_device_isa_feature_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {isa("flat-address-space")}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: device_isa_feature_selector\n"); return 0; } return 1; } int check_implementation_vendor_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(implementation = {vendor(amd)}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: implementation_vendor_selector\n"); return 0; } return 1; } int check_scoring() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(implementation = {vendor(score(20): amd)}: parallel num_threads(4))\ when(implementation = {vendor(score(100): amd)}: parallel num_threads(8))\ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 8) { printf("Failed metadirective: scoring\n"); return 0; } return 1; } int check_extension_match_any() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(cpu), arch("amdgcn")}, \ implementation = {extension(match_any)} \ : parallel)\ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: check_extension_match_any\n"); return 0; } return 1; } int check_extension_match_all() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(cpu), arch("amdgcn")}, \ implementation = {extension(match_all)} \ : parallel)\ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 1) { printf("Failed metadirective: check_extension_match_all\n"); return 0; } return 1; } int check_static_condition_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(user = {condition(N > 5)}: parallel num_threads(4)) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 4) { printf("Failed metadirective: static_condition_selector\n"); return 0; } return 1; } int main(void) { if (!check_device_kind_gpu_selector() || !check_device_kind_cpu_selector() || !check_device_arch_amdgcn_selector() || !check_device_arch_x86_64_selector() || !check_device_isa_feature_selector() || !check_implementation_vendor_selector() || !check_scoring() || !check_extension_match_any() || !check_extension_match_all() || !check_static_condition_selector()) { return -1; } printf("Success\n"); return 0; }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 14000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 100 #endif #endif #ifndef NTIMES # define NTIMES 100 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <time.h> #include <sys/time.h> #ifdef __hermit__ extern unsigned int get_cpufreq(); static unsigned long long start_tsc; inline static unsigned long long rdtsc(void) { unsigned long lo, hi; asm volatile ("rdtsc" : "=a"(lo), "=d"(hi) :: "memory"); return ((unsigned long long) hi << 32ULL | (unsigned long long) lo); } __attribute__((constructor)) static void timer_init() { start_tsc = rdtsc(); } #endif double mysecond() { #ifndef __hermit__ struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); #else double ret; ret = ((double) (rdtsc() - start_tsc)) / ((double) get_cpufreq() * 1000000.0); //printf("CPU frequency: %d MHz\n", get_cpufreq()); return ret; #endif } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
coordinate_transformation_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // // #ifndef KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H #define KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H // system includes // external includes #include "boost/numeric/ublas/matrix_proxy.hpp" // kratos includes #include "includes/define.h" #include "includes/node.h" #include "containers/variable.h" #include "geometries/geometry.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions. template<class TLocalMatrixType, class TLocalVectorType, class TValueType> class CoordinateTransformationUtils { public: ///@name Type Definitions ///@{ /// Pointer definition of CoordinateTransformationUtils KRATOS_CLASS_POINTER_DEFINITION(CoordinateTransformationUtils); typedef Node<3> NodeType; typedef Geometry< Node<3> > GeometryType; // typedef boost::numeric::ublas::matrix_row<TLocalMatrixType> LocalRowType; // // typedef boost::numeric::ublas::matrix_range<TLocalMatrixType> MatrixBlockType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** @param DomainSize Number of space dimensions (2 or 3) * @param NumRowsPerNode Number of matrix or vector rows associated to each node. Velocity DOFs are assumed to be the first mDomainSize rows in each block of rows. * @param rSelectionFlag All nodes where the flag given by this argument is set to true will be transformed to a rotated coordinate system. */ CoordinateTransformationUtils(const unsigned int DomainSize, const unsigned int NumRowsPerNode, const Kratos::Flags& rSelectionFlag = SLIP): mDomainSize(DomainSize), mBlockSize(NumRowsPerNode), mrFlag(rSelectionFlag) {} /// Destructor. virtual ~CoordinateTransformationUtils() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Rotate the local system contributions so that they are oriented with each node's normal. /** @param rLocalMatrix Local system matrix @param rLocalVector Local RHS vector @param rGeometry A reference to the element's (or condition's) geometry */ virtual void Rotate(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if(mBlockSize != mDomainSize) //Monolithic case { if(mDomainSize == 2) RotateAux<2,3>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAux<3,4>(rLocalMatrix,rLocalVector,rGeometry); } else //fractional step case { if(mDomainSize == 2) RotateAuxPure<2>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAuxPure<3>(rLocalMatrix,rLocalVector,rGeometry); } } /// RHS only version of Rotate virtual void Rotate(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { //const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) unsigned int Index = 0; if (rLocalVector.size() > 0) { if(mBlockSize != mDomainSize) //Monolithic case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,4> aux,aux1; BoundedMatrix<double,4,4> rRot; LocalRotationOperator3D<4>(rRot,rGeometry[j]); for(unsigned int k=0; k<4; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<4; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperator2D<3>(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) { aux[k] = rLocalVector[j*mBlockSize+k]; } noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } else //fractional step case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,2> aux,aux1; BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<2; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<2; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } } } /// Apply slip boundary conditions to the rotated local contributions. /** This function takes the local system contributions rotated so each node's velocities are expressed using a base oriented with its normal and imposes that the normal velocity is equal to the mesh velocity in the normal direction. */ virtual void ApplySlipCondition(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) if (LocalSize > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode])) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; //const double k = rLocalMatrix(j,j)+rLocalMatrix(j,j+1)+rLocalMatrix(j,j+2); // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); for( unsigned int i = 0; i < j; ++i)// Skip term (i,i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } for( unsigned int i = j+1; i < LocalSize; ++i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } rLocalVector(j) = inner_prod(rN,VMesh); rLocalMatrix(j,j) = 1.0; } } } } /// RHS only version of ApplySlipCondition virtual void ApplySlipCondition(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if (rLocalVector.size() > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode]) ) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); rLocalVector[j] = inner_prod(rN,VMesh); } } } } /// Transform nodal velocities to the rotated coordinates (aligned with each node's normal) virtual void RotateVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { //this->RotationOperator<TLocalMatrixType>(Rotation,); if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } /// Transform nodal velocities from the rotated system to the original one virtual void RecoverVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "CoordinateTransformationUtils"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "CoordinateTransformationUtils"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ template<unsigned int TDim, unsigned int TBlockSize, unsigned int TSkip = 0> void RotateAux(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / TBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TBlockSize,TBlockSize> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; if (TDim == 2) LocalRotationOperator2D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); else LocalRotationOperator3D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); } Index += TBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TBlockSize,TBlockSize> mat_block, tmp; array_1d<double,TBlockSize> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); } else { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } for(unsigned int k=0; k<TBlockSize; k++) aux[k] = rLocalVector[i*TBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TBlockSize; k++) rLocalVector[i*TBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } } } } } //to be used when there is only velocity (no additional pressure or other var block) template<unsigned int TDim> void RotateAuxPure(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / mBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TDim,TDim> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; LocalRotationOperatorPure(rRot[j],rGeometry[j]); } Index += mBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TDim,TDim> mat_block, tmp; array_1d<double,TDim> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); } else { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } for(unsigned int k=0; k<TDim; k++) aux[k] = rLocalVector[i*mBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TDim; k++) rLocalVector[i*mBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } } } } } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator2D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(TSkip ,TSkip ) = rNormal[0]/aux; rRot(TSkip ,TSkip+1) = rNormal[1]/aux; rRot(TSkip+1,TSkip ) = -rNormal[1]/aux; rRot(TSkip+1,TSkip+1) = rNormal[0]/aux; } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator3D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(TSkip,TSkip ) = rNormal[0]/aux; rRot(TSkip,TSkip+1) = rNormal[1]/aux; rRot(TSkip,TSkip+2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(TSkip,TSkip);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(TSkip,TSkip+1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(TSkip,TSkip); rT1[1] -= dot*rRot(TSkip,TSkip+1); rT1[2] -= dot*rRot(TSkip,TSkip+2); this->Normalize(rT1); rRot(TSkip+1,TSkip ) = rT1[0]; rRot(TSkip+1,TSkip+1) = rT1[1]; rRot(TSkip+1,TSkip+2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(TSkip+2,TSkip ) = rRot(TSkip,TSkip+1)*rT1[2] - rRot(TSkip,TSkip+2)*rT1[1]; rRot(TSkip+2,TSkip+1) = rRot(TSkip,TSkip+2)*rT1[0] - rRot(TSkip,TSkip )*rT1[2]; rRot(TSkip+2,TSkip+2) = rRot(TSkip,TSkip )*rT1[1] - rRot(TSkip,TSkip+1)*rT1[0]; } void LocalRotationOperatorPure(BoundedMatrix<double,3,3>& rRot, GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(0,2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(0,0);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(0,1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(0,0); rT1[1] -= dot*rRot(0,1); rT1[2] -= dot*rRot(0,2); this->Normalize(rT1); rRot(1,0) = rT1[0]; rRot(1,1) = rT1[1]; rRot(1,2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(2,0) = rRot(0,1)*rT1[2] - rRot(0,2)*rT1[1]; rRot(2,1) = rRot(0,2)*rT1[0] - rRot(0,0)*rT1[2]; rRot(2,2) = rRot(0,0)*rT1[1] - rRot(0,1)*rT1[0]; } void LocalRotationOperatorPure(BoundedMatrix<double,2,2>& rRot, GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(1,0) = -rNormal[1]/aux; rRot(1,1) = rNormal[0]/aux; } bool IsSlip(const Node<3>& rNode) const { return rNode.Is(mrFlag); } /// Normalize a vector. /** * @param rThis the vector * @return Original norm of the input vector */ template< class TVectorType > double Normalize(TVectorType& rThis) const { double Norm = 0; for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) Norm += (*iComponent)*(*iComponent); Norm = sqrt(Norm); for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) *iComponent /= Norm; return Norm; } ///@} ///@name Protected Access ///@{ unsigned int GetDomainSize() const { return mDomainSize; } unsigned int GetBlockSize() const { return mBlockSize; } ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Number of spatial dimensions const unsigned int mDomainSize; /// Number of matrix or vector rows associated to each node. /** @note Velocity Dofs are assumed to be the first mDomainSize rows. */ const unsigned int mBlockSize; const Kratos::Flags& mrFlag; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /// Compute a rotation matrix to transform values from the cartesian base to one oriented with the node's normal // /** // * The normal is read from solution step data NORMAL. Use NormalCalculationUtils::CalculateOnSimplex to // * obtain and store the nodal normal from the normals of the model's conditons. // * @param rRot The rotation matrix (output) // * @param rThisPoint The point used to orient the new coordinate system. // * @see NormalCalculationUtils // */ // template<class TMatrixType> // void RotationOperator(TMatrixType& rRot, // GeometryType::PointType& rThisPoint) const // { // typedef boost::numeric::ublas::matrix_row<TMatrixType> ThisRowType; // // Get the normal evaluated at the node // const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); // // if(mDomainSize == 3) // { // // Define the new coordinate system, where the first vector is aligned with the normal // ThisRowType rN(rRot,0); // for( unsigned int i = 0; i < 3; ++i) // rN[i] = rNormal[i]; // this->Normalize(rN); // // // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane // ThisRowType rT1(rRot,1); // rT1(0) = 1.0; // rT1(1) = 0.0; // rT1(2) = 0.0; // // double dot = this->Dot(rN,rT1); // // // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // // If this is the case, repeat the procedure using (0,1,0) // if ( fabs(dot) > 0.99 ) // { // rT1(0) = 0.0; // rT1(1) = 1.0; // rT1(2) = 0.0; // // dot = this->Dot(rN,rT1); // } // // // calculate projection and normalize // rT1 -= dot * rN; // this->Normalize(rT1); // // // The third base component is choosen as N x T1, which is normalized by construction // ThisRowType rT2(rRot,2); // rT2(0) = rN(1)*rT1(2) - rN(2)*rT1(1); // rT2(1) = rN(2)*rT1(0) - rN(0)*rT1(2); // rT2(2) = rN(0)*rT1(1) - rN(1)*rT1(0); // } // else //if(mDomainSize == 2) // { // /* The basis for the new coordinate system is (normal,tangent) // Tangent vector is chosen (-normal_y, normal_x) so that the resulting base // is right-handed. // */ // ThisRowType rN(rRot,0); // ThisRowType rT(rRot,1); // // rN[0] = rNormal[0]; // rN[1] = rNormal[1]; // this->Normalize(rN); // rT[0] = -rN[1]; // rT[1] = rN[0]; // } // // } template< class TVectorType > double Dot(const TVectorType& rV1, const TVectorType& rV2) const { double dot = 0.0; for( typename TVectorType::const_iterator iV1 = rV1.begin(),iV2 = rV2.begin(); iV1 != rV1.end(); ++iV1, ++iV2) { dot += (*iV1) * (*iV2); } return dot; } /// Transform a local contribution from cartesian coordinates to rotated ones // void ApplyRotation(TLocalMatrixType& rMatrix, // const TLocalMatrixType& rRotation) const // { // // compute B = R*A*transpose(R) // const unsigned int LocalSize = rMatrix.size1(); // const unsigned int NumBlocks = LocalSize / mBlockSize; // //TLocalMatrixType Tmp = ZeroMatrix(LocalSize,LocalSize); // /* // for (unsigned int iBlock = 0; iBlock < NumBlocks; iBlock++) // { // for (unsigned int jBlock = 0; jBlock < NumBlocks; jBlock++) // { // for (unsigned int i = iBlock*mBlockSize; i < (iBlock+1)*mBlockSize; i++) // { // for(unsigned int j = jBlock*mBlockSize; j < (jBlock+1)*mBlockSize; j++) // { // double& tij = Tmp(i,j); // for(unsigned int k = iBlock*mBlockSize; k < (iBlock+1)*mBlockSize; k++) // { // for(unsigned int l = jBlock*mBlockSize; l < (jBlock+1)*mBlockSize; l++) // { // tij += rRotation(i,k)*rMatrix(k,l)*rRotation(j,l); // } // } // } // } // } // }*/ // // Matrix Tmp = prod(rMatrix,trans(rRotation)); // noalias(rMatrix) = prod(rRotation,Tmp); // // // noalias(rMatrix) = Tmp; // } //auxiliary functions template< unsigned int TBlockSize > void ReadBlockMatrix( BoundedMatrix<double,TBlockSize, TBlockSize>& block, const Matrix& origin, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { block(i,j) = origin(Ibegin+i, Jbegin+j); } } } template< unsigned int TBlockSize > void WriteBlockMatrix( const BoundedMatrix<double,TBlockSize, TBlockSize>& block, Matrix& destination, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { destination(Ibegin+i, Jbegin+j) = block(i,j); } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CoordinateTransformationUtils& operator=(CoordinateTransformationUtils const& rOther) {} /// Copy constructor. CoordinateTransformationUtils(CoordinateTransformationUtils const& rOther) {} ///@} }; ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::istream& operator >>(std::istream& rIStream, CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { return rIStream; } /// output stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::ostream& operator <<(std::ostream& rOStream, const CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } #endif // KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H
pl10-1.c
#include <omp.h> #include <stdio.h> int main() { double T1 = omp_get_wtime(); #pragma omp parallel { int tid = omp_get_thread_num(); printf("Hello, world! from thread %d\n", tid); if (tid == 0) { printf("cores: %d\n", omp_get_num_procs()); printf("threads: %d\n", omp_get_num_threads()); } } double T2 = omp_get_wtime(); printf("That's all, folks! Time %d \n", T2 - T1); }
8855.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < _PB_N; i++) { #pragma omp parallel for schedule(dynamic, 1) num_threads(1) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp parallel for schedule(dynamic, 1) num_threads(1) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
acado_solver.c
/* * This file was auto-generated using the ACADO Toolkit. * * While ACADO Toolkit is free software released under the terms of * the GNU Lesser General Public License (LGPL), the generated code * as such remains the property of the user who used ACADO Toolkit * to generate this code. In particular, user dependent data of the code * do not inherit the GNU LGPL license. On the other hand, parts of the * generated code that are a direct copy of source code from the * ACADO Toolkit or the software tools it is based on, remain, as derived * work, automatically covered by the LGPL license. * * ACADO Toolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include "acado_common.h" /******************************************************************************/ /* */ /* ACADO code generation */ /* */ /******************************************************************************/ /** Row vector of size: 164 */ real_t state[ 164 ]; int acado_modelSimulation( ) { int ret; int lRun1; ret = 0; #pragma omp parallel for private(lRun1, state) shared(acadoWorkspace, acadoVariables) for (lRun1 = 0; lRun1 < 20; ++lRun1) { state[0] = acadoVariables.x[lRun1 * 10]; state[1] = acadoVariables.x[lRun1 * 10 + 1]; state[2] = acadoVariables.x[lRun1 * 10 + 2]; state[3] = acadoVariables.x[lRun1 * 10 + 3]; state[4] = acadoVariables.x[lRun1 * 10 + 4]; state[5] = acadoVariables.x[lRun1 * 10 + 5]; state[6] = acadoVariables.x[lRun1 * 10 + 6]; state[7] = acadoVariables.x[lRun1 * 10 + 7]; state[8] = acadoVariables.x[lRun1 * 10 + 8]; state[9] = acadoVariables.x[lRun1 * 10 + 9]; state[150] = acadoVariables.u[lRun1 * 4]; state[151] = acadoVariables.u[lRun1 * 4 + 1]; state[152] = acadoVariables.u[lRun1 * 4 + 2]; state[153] = acadoVariables.u[lRun1 * 4 + 3]; state[154] = acadoVariables.od[lRun1 * 10]; state[155] = acadoVariables.od[lRun1 * 10 + 1]; state[156] = acadoVariables.od[lRun1 * 10 + 2]; state[157] = acadoVariables.od[lRun1 * 10 + 3]; state[158] = acadoVariables.od[lRun1 * 10 + 4]; state[159] = acadoVariables.od[lRun1 * 10 + 5]; state[160] = acadoVariables.od[lRun1 * 10 + 6]; state[161] = acadoVariables.od[lRun1 * 10 + 7]; state[162] = acadoVariables.od[lRun1 * 10 + 8]; state[163] = acadoVariables.od[lRun1 * 10 + 9]; ret = acado_integrate(state, 1); acadoWorkspace.d[lRun1 * 10] = state[0] - acadoVariables.x[lRun1 * 10 + 10]; acadoWorkspace.d[lRun1 * 10 + 1] = state[1] - acadoVariables.x[lRun1 * 10 + 11]; acadoWorkspace.d[lRun1 * 10 + 2] = state[2] - acadoVariables.x[lRun1 * 10 + 12]; acadoWorkspace.d[lRun1 * 10 + 3] = state[3] - acadoVariables.x[lRun1 * 10 + 13]; acadoWorkspace.d[lRun1 * 10 + 4] = state[4] - acadoVariables.x[lRun1 * 10 + 14]; acadoWorkspace.d[lRun1 * 10 + 5] = state[5] - acadoVariables.x[lRun1 * 10 + 15]; acadoWorkspace.d[lRun1 * 10 + 6] = state[6] - acadoVariables.x[lRun1 * 10 + 16]; acadoWorkspace.d[lRun1 * 10 + 7] = state[7] - acadoVariables.x[lRun1 * 10 + 17]; acadoWorkspace.d[lRun1 * 10 + 8] = state[8] - acadoVariables.x[lRun1 * 10 + 18]; acadoWorkspace.d[lRun1 * 10 + 9] = state[9] - acadoVariables.x[lRun1 * 10 + 19]; acadoWorkspace.evGx[lRun1 * 100] = state[10]; acadoWorkspace.evGx[lRun1 * 100 + 1] = state[11]; acadoWorkspace.evGx[lRun1 * 100 + 2] = state[12]; acadoWorkspace.evGx[lRun1 * 100 + 3] = state[13]; acadoWorkspace.evGx[lRun1 * 100 + 4] = state[14]; acadoWorkspace.evGx[lRun1 * 100 + 5] = state[15]; acadoWorkspace.evGx[lRun1 * 100 + 6] = state[16]; acadoWorkspace.evGx[lRun1 * 100 + 7] = state[17]; acadoWorkspace.evGx[lRun1 * 100 + 8] = state[18]; acadoWorkspace.evGx[lRun1 * 100 + 9] = state[19]; acadoWorkspace.evGx[lRun1 * 100 + 10] = state[20]; acadoWorkspace.evGx[lRun1 * 100 + 11] = state[21]; acadoWorkspace.evGx[lRun1 * 100 + 12] = state[22]; acadoWorkspace.evGx[lRun1 * 100 + 13] = state[23]; acadoWorkspace.evGx[lRun1 * 100 + 14] = state[24]; acadoWorkspace.evGx[lRun1 * 100 + 15] = state[25]; acadoWorkspace.evGx[lRun1 * 100 + 16] = state[26]; acadoWorkspace.evGx[lRun1 * 100 + 17] = state[27]; acadoWorkspace.evGx[lRun1 * 100 + 18] = state[28]; acadoWorkspace.evGx[lRun1 * 100 + 19] = state[29]; acadoWorkspace.evGx[lRun1 * 100 + 20] = state[30]; acadoWorkspace.evGx[lRun1 * 100 + 21] = state[31]; acadoWorkspace.evGx[lRun1 * 100 + 22] = state[32]; acadoWorkspace.evGx[lRun1 * 100 + 23] = state[33]; acadoWorkspace.evGx[lRun1 * 100 + 24] = state[34]; acadoWorkspace.evGx[lRun1 * 100 + 25] = state[35]; acadoWorkspace.evGx[lRun1 * 100 + 26] = state[36]; acadoWorkspace.evGx[lRun1 * 100 + 27] = state[37]; acadoWorkspace.evGx[lRun1 * 100 + 28] = state[38]; acadoWorkspace.evGx[lRun1 * 100 + 29] = state[39]; acadoWorkspace.evGx[lRun1 * 100 + 30] = state[40]; acadoWorkspace.evGx[lRun1 * 100 + 31] = state[41]; acadoWorkspace.evGx[lRun1 * 100 + 32] = state[42]; acadoWorkspace.evGx[lRun1 * 100 + 33] = state[43]; acadoWorkspace.evGx[lRun1 * 100 + 34] = state[44]; acadoWorkspace.evGx[lRun1 * 100 + 35] = state[45]; acadoWorkspace.evGx[lRun1 * 100 + 36] = state[46]; acadoWorkspace.evGx[lRun1 * 100 + 37] = state[47]; acadoWorkspace.evGx[lRun1 * 100 + 38] = state[48]; acadoWorkspace.evGx[lRun1 * 100 + 39] = state[49]; acadoWorkspace.evGx[lRun1 * 100 + 40] = state[50]; acadoWorkspace.evGx[lRun1 * 100 + 41] = state[51]; acadoWorkspace.evGx[lRun1 * 100 + 42] = state[52]; acadoWorkspace.evGx[lRun1 * 100 + 43] = state[53]; acadoWorkspace.evGx[lRun1 * 100 + 44] = state[54]; acadoWorkspace.evGx[lRun1 * 100 + 45] = state[55]; acadoWorkspace.evGx[lRun1 * 100 + 46] = state[56]; acadoWorkspace.evGx[lRun1 * 100 + 47] = state[57]; acadoWorkspace.evGx[lRun1 * 100 + 48] = state[58]; acadoWorkspace.evGx[lRun1 * 100 + 49] = state[59]; acadoWorkspace.evGx[lRun1 * 100 + 50] = state[60]; acadoWorkspace.evGx[lRun1 * 100 + 51] = state[61]; acadoWorkspace.evGx[lRun1 * 100 + 52] = state[62]; acadoWorkspace.evGx[lRun1 * 100 + 53] = state[63]; acadoWorkspace.evGx[lRun1 * 100 + 54] = state[64]; acadoWorkspace.evGx[lRun1 * 100 + 55] = state[65]; acadoWorkspace.evGx[lRun1 * 100 + 56] = state[66]; acadoWorkspace.evGx[lRun1 * 100 + 57] = state[67]; acadoWorkspace.evGx[lRun1 * 100 + 58] = state[68]; acadoWorkspace.evGx[lRun1 * 100 + 59] = state[69]; acadoWorkspace.evGx[lRun1 * 100 + 60] = state[70]; acadoWorkspace.evGx[lRun1 * 100 + 61] = state[71]; acadoWorkspace.evGx[lRun1 * 100 + 62] = state[72]; acadoWorkspace.evGx[lRun1 * 100 + 63] = state[73]; acadoWorkspace.evGx[lRun1 * 100 + 64] = state[74]; acadoWorkspace.evGx[lRun1 * 100 + 65] = state[75]; acadoWorkspace.evGx[lRun1 * 100 + 66] = state[76]; acadoWorkspace.evGx[lRun1 * 100 + 67] = state[77]; acadoWorkspace.evGx[lRun1 * 100 + 68] = state[78]; acadoWorkspace.evGx[lRun1 * 100 + 69] = state[79]; acadoWorkspace.evGx[lRun1 * 100 + 70] = state[80]; acadoWorkspace.evGx[lRun1 * 100 + 71] = state[81]; acadoWorkspace.evGx[lRun1 * 100 + 72] = state[82]; acadoWorkspace.evGx[lRun1 * 100 + 73] = state[83]; acadoWorkspace.evGx[lRun1 * 100 + 74] = state[84]; acadoWorkspace.evGx[lRun1 * 100 + 75] = state[85]; acadoWorkspace.evGx[lRun1 * 100 + 76] = state[86]; acadoWorkspace.evGx[lRun1 * 100 + 77] = state[87]; acadoWorkspace.evGx[lRun1 * 100 + 78] = state[88]; acadoWorkspace.evGx[lRun1 * 100 + 79] = state[89]; acadoWorkspace.evGx[lRun1 * 100 + 80] = state[90]; acadoWorkspace.evGx[lRun1 * 100 + 81] = state[91]; acadoWorkspace.evGx[lRun1 * 100 + 82] = state[92]; acadoWorkspace.evGx[lRun1 * 100 + 83] = state[93]; acadoWorkspace.evGx[lRun1 * 100 + 84] = state[94]; acadoWorkspace.evGx[lRun1 * 100 + 85] = state[95]; acadoWorkspace.evGx[lRun1 * 100 + 86] = state[96]; acadoWorkspace.evGx[lRun1 * 100 + 87] = state[97]; acadoWorkspace.evGx[lRun1 * 100 + 88] = state[98]; acadoWorkspace.evGx[lRun1 * 100 + 89] = state[99]; acadoWorkspace.evGx[lRun1 * 100 + 90] = state[100]; acadoWorkspace.evGx[lRun1 * 100 + 91] = state[101]; acadoWorkspace.evGx[lRun1 * 100 + 92] = state[102]; acadoWorkspace.evGx[lRun1 * 100 + 93] = state[103]; acadoWorkspace.evGx[lRun1 * 100 + 94] = state[104]; acadoWorkspace.evGx[lRun1 * 100 + 95] = state[105]; acadoWorkspace.evGx[lRun1 * 100 + 96] = state[106]; acadoWorkspace.evGx[lRun1 * 100 + 97] = state[107]; acadoWorkspace.evGx[lRun1 * 100 + 98] = state[108]; acadoWorkspace.evGx[lRun1 * 100 + 99] = state[109]; acadoWorkspace.evGu[lRun1 * 40] = state[110]; acadoWorkspace.evGu[lRun1 * 40 + 1] = state[111]; acadoWorkspace.evGu[lRun1 * 40 + 2] = state[112]; acadoWorkspace.evGu[lRun1 * 40 + 3] = state[113]; acadoWorkspace.evGu[lRun1 * 40 + 4] = state[114]; acadoWorkspace.evGu[lRun1 * 40 + 5] = state[115]; acadoWorkspace.evGu[lRun1 * 40 + 6] = state[116]; acadoWorkspace.evGu[lRun1 * 40 + 7] = state[117]; acadoWorkspace.evGu[lRun1 * 40 + 8] = state[118]; acadoWorkspace.evGu[lRun1 * 40 + 9] = state[119]; acadoWorkspace.evGu[lRun1 * 40 + 10] = state[120]; acadoWorkspace.evGu[lRun1 * 40 + 11] = state[121]; acadoWorkspace.evGu[lRun1 * 40 + 12] = state[122]; acadoWorkspace.evGu[lRun1 * 40 + 13] = state[123]; acadoWorkspace.evGu[lRun1 * 40 + 14] = state[124]; acadoWorkspace.evGu[lRun1 * 40 + 15] = state[125]; acadoWorkspace.evGu[lRun1 * 40 + 16] = state[126]; acadoWorkspace.evGu[lRun1 * 40 + 17] = state[127]; acadoWorkspace.evGu[lRun1 * 40 + 18] = state[128]; acadoWorkspace.evGu[lRun1 * 40 + 19] = state[129]; acadoWorkspace.evGu[lRun1 * 40 + 20] = state[130]; acadoWorkspace.evGu[lRun1 * 40 + 21] = state[131]; acadoWorkspace.evGu[lRun1 * 40 + 22] = state[132]; acadoWorkspace.evGu[lRun1 * 40 + 23] = state[133]; acadoWorkspace.evGu[lRun1 * 40 + 24] = state[134]; acadoWorkspace.evGu[lRun1 * 40 + 25] = state[135]; acadoWorkspace.evGu[lRun1 * 40 + 26] = state[136]; acadoWorkspace.evGu[lRun1 * 40 + 27] = state[137]; acadoWorkspace.evGu[lRun1 * 40 + 28] = state[138]; acadoWorkspace.evGu[lRun1 * 40 + 29] = state[139]; acadoWorkspace.evGu[lRun1 * 40 + 30] = state[140]; acadoWorkspace.evGu[lRun1 * 40 + 31] = state[141]; acadoWorkspace.evGu[lRun1 * 40 + 32] = state[142]; acadoWorkspace.evGu[lRun1 * 40 + 33] = state[143]; acadoWorkspace.evGu[lRun1 * 40 + 34] = state[144]; acadoWorkspace.evGu[lRun1 * 40 + 35] = state[145]; acadoWorkspace.evGu[lRun1 * 40 + 36] = state[146]; acadoWorkspace.evGu[lRun1 * 40 + 37] = state[147]; acadoWorkspace.evGu[lRun1 * 40 + 38] = state[148]; acadoWorkspace.evGu[lRun1 * 40 + 39] = state[149]; } return ret; } void acado_evaluateLSQ(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 10; /* Compute outputs: */ out[0] = xd[0]; out[1] = xd[1]; out[2] = xd[2]; out[3] = xd[3]; out[4] = xd[4]; out[5] = xd[5]; out[6] = xd[6]; out[7] = xd[7]; out[8] = xd[8]; out[9] = xd[9]; out[10] = u[0]; out[11] = u[1]; out[12] = u[2]; out[13] = u[3]; } void acado_evaluateLSQEndTerm(const real_t* in, real_t* out) { const real_t* xd = in; /* Compute outputs: */ out[0] = xd[0]; out[1] = xd[1]; out[2] = xd[2]; out[3] = xd[3]; out[4] = xd[4]; out[5] = xd[5]; out[6] = xd[6]; out[7] = xd[7]; out[8] = xd[8]; out[9] = xd[9]; } void acado_evaluatePathConstraints(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 10; /* Vector of auxiliary variables; number of elements: 21. */ real_t* a = acadoWorkspace.conAuxVar; /* Compute intermediate quantities: */ a[0] = ((u[1])*(u[1])); a[1] = ((u[2])*(u[2])); a[2] = (sqrt(((a[0]+a[1])+(real_t)(1.0000000000000000e-04)))); a[3] = (real_t)(0.0000000000000000e+00); a[4] = (real_t)(0.0000000000000000e+00); a[5] = (real_t)(0.0000000000000000e+00); a[6] = (real_t)(0.0000000000000000e+00); a[7] = (real_t)(0.0000000000000000e+00); a[8] = (real_t)(0.0000000000000000e+00); a[9] = (real_t)(0.0000000000000000e+00); a[10] = (real_t)(0.0000000000000000e+00); a[11] = (real_t)(0.0000000000000000e+00); a[12] = (real_t)(0.0000000000000000e+00); a[13] = (real_t)(1.0000000000000000e+00); a[14] = ((real_t)(2.0000000000000000e+00)*u[1]); a[15] = (1.0/sqrt(((a[0]+a[1])+(real_t)(1.0000000000000000e-04)))); a[16] = (a[15]*(real_t)(5.0000000000000000e-01)); a[17] = (a[14]*a[16]); a[18] = ((real_t)(2.0000000000000000e+00)*u[2]); a[19] = (a[18]*a[16]); a[20] = (real_t)(0.0000000000000000e+00); /* Compute outputs: */ out[0] = (u[0]+a[2]); out[1] = a[3]; out[2] = a[4]; out[3] = a[5]; out[4] = a[6]; out[5] = a[7]; out[6] = a[8]; out[7] = a[9]; out[8] = a[10]; out[9] = a[11]; out[10] = a[12]; out[11] = a[13]; out[12] = a[17]; out[13] = a[19]; out[14] = a[20]; } void acado_setObjQ1Q2( real_t* const tmpObjS, real_t* const tmpQ1, real_t* const tmpQ2 ) { tmpQ2[0] = +tmpObjS[0]; tmpQ2[1] = +tmpObjS[1]; tmpQ2[2] = +tmpObjS[2]; tmpQ2[3] = +tmpObjS[3]; tmpQ2[4] = +tmpObjS[4]; tmpQ2[5] = +tmpObjS[5]; tmpQ2[6] = +tmpObjS[6]; tmpQ2[7] = +tmpObjS[7]; tmpQ2[8] = +tmpObjS[8]; tmpQ2[9] = +tmpObjS[9]; tmpQ2[10] = +tmpObjS[10]; tmpQ2[11] = +tmpObjS[11]; tmpQ2[12] = +tmpObjS[12]; tmpQ2[13] = +tmpObjS[13]; tmpQ2[14] = +tmpObjS[14]; tmpQ2[15] = +tmpObjS[15]; tmpQ2[16] = +tmpObjS[16]; tmpQ2[17] = +tmpObjS[17]; tmpQ2[18] = +tmpObjS[18]; tmpQ2[19] = +tmpObjS[19]; tmpQ2[20] = +tmpObjS[20]; tmpQ2[21] = +tmpObjS[21]; tmpQ2[22] = +tmpObjS[22]; tmpQ2[23] = +tmpObjS[23]; tmpQ2[24] = +tmpObjS[24]; tmpQ2[25] = +tmpObjS[25]; tmpQ2[26] = +tmpObjS[26]; tmpQ2[27] = +tmpObjS[27]; tmpQ2[28] = +tmpObjS[28]; tmpQ2[29] = +tmpObjS[29]; tmpQ2[30] = +tmpObjS[30]; tmpQ2[31] = +tmpObjS[31]; tmpQ2[32] = +tmpObjS[32]; tmpQ2[33] = +tmpObjS[33]; tmpQ2[34] = +tmpObjS[34]; tmpQ2[35] = +tmpObjS[35]; tmpQ2[36] = +tmpObjS[36]; tmpQ2[37] = +tmpObjS[37]; tmpQ2[38] = +tmpObjS[38]; tmpQ2[39] = +tmpObjS[39]; tmpQ2[40] = +tmpObjS[40]; tmpQ2[41] = +tmpObjS[41]; tmpQ2[42] = +tmpObjS[42]; tmpQ2[43] = +tmpObjS[43]; tmpQ2[44] = +tmpObjS[44]; tmpQ2[45] = +tmpObjS[45]; tmpQ2[46] = +tmpObjS[46]; tmpQ2[47] = +tmpObjS[47]; tmpQ2[48] = +tmpObjS[48]; tmpQ2[49] = +tmpObjS[49]; tmpQ2[50] = +tmpObjS[50]; tmpQ2[51] = +tmpObjS[51]; tmpQ2[52] = +tmpObjS[52]; tmpQ2[53] = +tmpObjS[53]; tmpQ2[54] = +tmpObjS[54]; tmpQ2[55] = +tmpObjS[55]; tmpQ2[56] = +tmpObjS[56]; tmpQ2[57] = +tmpObjS[57]; tmpQ2[58] = +tmpObjS[58]; tmpQ2[59] = +tmpObjS[59]; tmpQ2[60] = +tmpObjS[60]; tmpQ2[61] = +tmpObjS[61]; tmpQ2[62] = +tmpObjS[62]; tmpQ2[63] = +tmpObjS[63]; tmpQ2[64] = +tmpObjS[64]; tmpQ2[65] = +tmpObjS[65]; tmpQ2[66] = +tmpObjS[66]; tmpQ2[67] = +tmpObjS[67]; tmpQ2[68] = +tmpObjS[68]; tmpQ2[69] = +tmpObjS[69]; tmpQ2[70] = +tmpObjS[70]; tmpQ2[71] = +tmpObjS[71]; tmpQ2[72] = +tmpObjS[72]; tmpQ2[73] = +tmpObjS[73]; tmpQ2[74] = +tmpObjS[74]; tmpQ2[75] = +tmpObjS[75]; tmpQ2[76] = +tmpObjS[76]; tmpQ2[77] = +tmpObjS[77]; tmpQ2[78] = +tmpObjS[78]; tmpQ2[79] = +tmpObjS[79]; tmpQ2[80] = +tmpObjS[80]; tmpQ2[81] = +tmpObjS[81]; tmpQ2[82] = +tmpObjS[82]; tmpQ2[83] = +tmpObjS[83]; tmpQ2[84] = +tmpObjS[84]; tmpQ2[85] = +tmpObjS[85]; tmpQ2[86] = +tmpObjS[86]; tmpQ2[87] = +tmpObjS[87]; tmpQ2[88] = +tmpObjS[88]; tmpQ2[89] = +tmpObjS[89]; tmpQ2[90] = +tmpObjS[90]; tmpQ2[91] = +tmpObjS[91]; tmpQ2[92] = +tmpObjS[92]; tmpQ2[93] = +tmpObjS[93]; tmpQ2[94] = +tmpObjS[94]; tmpQ2[95] = +tmpObjS[95]; tmpQ2[96] = +tmpObjS[96]; tmpQ2[97] = +tmpObjS[97]; tmpQ2[98] = +tmpObjS[98]; tmpQ2[99] = +tmpObjS[99]; tmpQ2[100] = +tmpObjS[100]; tmpQ2[101] = +tmpObjS[101]; tmpQ2[102] = +tmpObjS[102]; tmpQ2[103] = +tmpObjS[103]; tmpQ2[104] = +tmpObjS[104]; tmpQ2[105] = +tmpObjS[105]; tmpQ2[106] = +tmpObjS[106]; tmpQ2[107] = +tmpObjS[107]; tmpQ2[108] = +tmpObjS[108]; tmpQ2[109] = +tmpObjS[109]; tmpQ2[110] = +tmpObjS[110]; tmpQ2[111] = +tmpObjS[111]; tmpQ2[112] = +tmpObjS[112]; tmpQ2[113] = +tmpObjS[113]; tmpQ2[114] = +tmpObjS[114]; tmpQ2[115] = +tmpObjS[115]; tmpQ2[116] = +tmpObjS[116]; tmpQ2[117] = +tmpObjS[117]; tmpQ2[118] = +tmpObjS[118]; tmpQ2[119] = +tmpObjS[119]; tmpQ2[120] = +tmpObjS[120]; tmpQ2[121] = +tmpObjS[121]; tmpQ2[122] = +tmpObjS[122]; tmpQ2[123] = +tmpObjS[123]; tmpQ2[124] = +tmpObjS[124]; tmpQ2[125] = +tmpObjS[125]; tmpQ2[126] = +tmpObjS[126]; tmpQ2[127] = +tmpObjS[127]; tmpQ2[128] = +tmpObjS[128]; tmpQ2[129] = +tmpObjS[129]; tmpQ2[130] = +tmpObjS[130]; tmpQ2[131] = +tmpObjS[131]; tmpQ2[132] = +tmpObjS[132]; tmpQ2[133] = +tmpObjS[133]; tmpQ2[134] = +tmpObjS[134]; tmpQ2[135] = +tmpObjS[135]; tmpQ2[136] = +tmpObjS[136]; tmpQ2[137] = +tmpObjS[137]; tmpQ2[138] = +tmpObjS[138]; tmpQ2[139] = +tmpObjS[139]; tmpQ1[0] = + tmpQ2[0]; tmpQ1[1] = + tmpQ2[1]; tmpQ1[2] = + tmpQ2[2]; tmpQ1[3] = + tmpQ2[3]; tmpQ1[4] = + tmpQ2[4]; tmpQ1[5] = + tmpQ2[5]; tmpQ1[6] = + tmpQ2[6]; tmpQ1[7] = + tmpQ2[7]; tmpQ1[8] = + tmpQ2[8]; tmpQ1[9] = + tmpQ2[9]; tmpQ1[10] = + tmpQ2[14]; tmpQ1[11] = + tmpQ2[15]; tmpQ1[12] = + tmpQ2[16]; tmpQ1[13] = + tmpQ2[17]; tmpQ1[14] = + tmpQ2[18]; tmpQ1[15] = + tmpQ2[19]; tmpQ1[16] = + tmpQ2[20]; tmpQ1[17] = + tmpQ2[21]; tmpQ1[18] = + tmpQ2[22]; tmpQ1[19] = + tmpQ2[23]; tmpQ1[20] = + tmpQ2[28]; tmpQ1[21] = + tmpQ2[29]; tmpQ1[22] = + tmpQ2[30]; tmpQ1[23] = + tmpQ2[31]; tmpQ1[24] = + tmpQ2[32]; tmpQ1[25] = + tmpQ2[33]; tmpQ1[26] = + tmpQ2[34]; tmpQ1[27] = + tmpQ2[35]; tmpQ1[28] = + tmpQ2[36]; tmpQ1[29] = + tmpQ2[37]; tmpQ1[30] = + tmpQ2[42]; tmpQ1[31] = + tmpQ2[43]; tmpQ1[32] = + tmpQ2[44]; tmpQ1[33] = + tmpQ2[45]; tmpQ1[34] = + tmpQ2[46]; tmpQ1[35] = + tmpQ2[47]; tmpQ1[36] = + tmpQ2[48]; tmpQ1[37] = + tmpQ2[49]; tmpQ1[38] = + tmpQ2[50]; tmpQ1[39] = + tmpQ2[51]; tmpQ1[40] = + tmpQ2[56]; tmpQ1[41] = + tmpQ2[57]; tmpQ1[42] = + tmpQ2[58]; tmpQ1[43] = + tmpQ2[59]; tmpQ1[44] = + tmpQ2[60]; tmpQ1[45] = + tmpQ2[61]; tmpQ1[46] = + tmpQ2[62]; tmpQ1[47] = + tmpQ2[63]; tmpQ1[48] = + tmpQ2[64]; tmpQ1[49] = + tmpQ2[65]; tmpQ1[50] = + tmpQ2[70]; tmpQ1[51] = + tmpQ2[71]; tmpQ1[52] = + tmpQ2[72]; tmpQ1[53] = + tmpQ2[73]; tmpQ1[54] = + tmpQ2[74]; tmpQ1[55] = + tmpQ2[75]; tmpQ1[56] = + tmpQ2[76]; tmpQ1[57] = + tmpQ2[77]; tmpQ1[58] = + tmpQ2[78]; tmpQ1[59] = + tmpQ2[79]; tmpQ1[60] = + tmpQ2[84]; tmpQ1[61] = + tmpQ2[85]; tmpQ1[62] = + tmpQ2[86]; tmpQ1[63] = + tmpQ2[87]; tmpQ1[64] = + tmpQ2[88]; tmpQ1[65] = + tmpQ2[89]; tmpQ1[66] = + tmpQ2[90]; tmpQ1[67] = + tmpQ2[91]; tmpQ1[68] = + tmpQ2[92]; tmpQ1[69] = + tmpQ2[93]; tmpQ1[70] = + tmpQ2[98]; tmpQ1[71] = + tmpQ2[99]; tmpQ1[72] = + tmpQ2[100]; tmpQ1[73] = + tmpQ2[101]; tmpQ1[74] = + tmpQ2[102]; tmpQ1[75] = + tmpQ2[103]; tmpQ1[76] = + tmpQ2[104]; tmpQ1[77] = + tmpQ2[105]; tmpQ1[78] = + tmpQ2[106]; tmpQ1[79] = + tmpQ2[107]; tmpQ1[80] = + tmpQ2[112]; tmpQ1[81] = + tmpQ2[113]; tmpQ1[82] = + tmpQ2[114]; tmpQ1[83] = + tmpQ2[115]; tmpQ1[84] = + tmpQ2[116]; tmpQ1[85] = + tmpQ2[117]; tmpQ1[86] = + tmpQ2[118]; tmpQ1[87] = + tmpQ2[119]; tmpQ1[88] = + tmpQ2[120]; tmpQ1[89] = + tmpQ2[121]; tmpQ1[90] = + tmpQ2[126]; tmpQ1[91] = + tmpQ2[127]; tmpQ1[92] = + tmpQ2[128]; tmpQ1[93] = + tmpQ2[129]; tmpQ1[94] = + tmpQ2[130]; tmpQ1[95] = + tmpQ2[131]; tmpQ1[96] = + tmpQ2[132]; tmpQ1[97] = + tmpQ2[133]; tmpQ1[98] = + tmpQ2[134]; tmpQ1[99] = + tmpQ2[135]; } void acado_setObjR1R2( real_t* const tmpObjS, real_t* const tmpR1, real_t* const tmpR2 ) { tmpR2[0] = +tmpObjS[140]; tmpR2[1] = +tmpObjS[141]; tmpR2[2] = +tmpObjS[142]; tmpR2[3] = +tmpObjS[143]; tmpR2[4] = +tmpObjS[144]; tmpR2[5] = +tmpObjS[145]; tmpR2[6] = +tmpObjS[146]; tmpR2[7] = +tmpObjS[147]; tmpR2[8] = +tmpObjS[148]; tmpR2[9] = +tmpObjS[149]; tmpR2[10] = +tmpObjS[150]; tmpR2[11] = +tmpObjS[151]; tmpR2[12] = +tmpObjS[152]; tmpR2[13] = +tmpObjS[153]; tmpR2[14] = +tmpObjS[154]; tmpR2[15] = +tmpObjS[155]; tmpR2[16] = +tmpObjS[156]; tmpR2[17] = +tmpObjS[157]; tmpR2[18] = +tmpObjS[158]; tmpR2[19] = +tmpObjS[159]; tmpR2[20] = +tmpObjS[160]; tmpR2[21] = +tmpObjS[161]; tmpR2[22] = +tmpObjS[162]; tmpR2[23] = +tmpObjS[163]; tmpR2[24] = +tmpObjS[164]; tmpR2[25] = +tmpObjS[165]; tmpR2[26] = +tmpObjS[166]; tmpR2[27] = +tmpObjS[167]; tmpR2[28] = +tmpObjS[168]; tmpR2[29] = +tmpObjS[169]; tmpR2[30] = +tmpObjS[170]; tmpR2[31] = +tmpObjS[171]; tmpR2[32] = +tmpObjS[172]; tmpR2[33] = +tmpObjS[173]; tmpR2[34] = +tmpObjS[174]; tmpR2[35] = +tmpObjS[175]; tmpR2[36] = +tmpObjS[176]; tmpR2[37] = +tmpObjS[177]; tmpR2[38] = +tmpObjS[178]; tmpR2[39] = +tmpObjS[179]; tmpR2[40] = +tmpObjS[180]; tmpR2[41] = +tmpObjS[181]; tmpR2[42] = +tmpObjS[182]; tmpR2[43] = +tmpObjS[183]; tmpR2[44] = +tmpObjS[184]; tmpR2[45] = +tmpObjS[185]; tmpR2[46] = +tmpObjS[186]; tmpR2[47] = +tmpObjS[187]; tmpR2[48] = +tmpObjS[188]; tmpR2[49] = +tmpObjS[189]; tmpR2[50] = +tmpObjS[190]; tmpR2[51] = +tmpObjS[191]; tmpR2[52] = +tmpObjS[192]; tmpR2[53] = +tmpObjS[193]; tmpR2[54] = +tmpObjS[194]; tmpR2[55] = +tmpObjS[195]; tmpR1[0] = + tmpR2[10]; tmpR1[1] = + tmpR2[11]; tmpR1[2] = + tmpR2[12]; tmpR1[3] = + tmpR2[13]; tmpR1[4] = + tmpR2[24]; tmpR1[5] = + tmpR2[25]; tmpR1[6] = + tmpR2[26]; tmpR1[7] = + tmpR2[27]; tmpR1[8] = + tmpR2[38]; tmpR1[9] = + tmpR2[39]; tmpR1[10] = + tmpR2[40]; tmpR1[11] = + tmpR2[41]; tmpR1[12] = + tmpR2[52]; tmpR1[13] = + tmpR2[53]; tmpR1[14] = + tmpR2[54]; tmpR1[15] = + tmpR2[55]; } void acado_setObjQN1QN2( real_t* const tmpObjSEndTerm, real_t* const tmpQN1, real_t* const tmpQN2 ) { tmpQN2[0] = +tmpObjSEndTerm[0]; tmpQN2[1] = +tmpObjSEndTerm[1]; tmpQN2[2] = +tmpObjSEndTerm[2]; tmpQN2[3] = +tmpObjSEndTerm[3]; tmpQN2[4] = +tmpObjSEndTerm[4]; tmpQN2[5] = +tmpObjSEndTerm[5]; tmpQN2[6] = +tmpObjSEndTerm[6]; tmpQN2[7] = +tmpObjSEndTerm[7]; tmpQN2[8] = +tmpObjSEndTerm[8]; tmpQN2[9] = +tmpObjSEndTerm[9]; tmpQN2[10] = +tmpObjSEndTerm[10]; tmpQN2[11] = +tmpObjSEndTerm[11]; tmpQN2[12] = +tmpObjSEndTerm[12]; tmpQN2[13] = +tmpObjSEndTerm[13]; tmpQN2[14] = +tmpObjSEndTerm[14]; tmpQN2[15] = +tmpObjSEndTerm[15]; tmpQN2[16] = +tmpObjSEndTerm[16]; tmpQN2[17] = +tmpObjSEndTerm[17]; tmpQN2[18] = +tmpObjSEndTerm[18]; tmpQN2[19] = +tmpObjSEndTerm[19]; tmpQN2[20] = +tmpObjSEndTerm[20]; tmpQN2[21] = +tmpObjSEndTerm[21]; tmpQN2[22] = +tmpObjSEndTerm[22]; tmpQN2[23] = +tmpObjSEndTerm[23]; tmpQN2[24] = +tmpObjSEndTerm[24]; tmpQN2[25] = +tmpObjSEndTerm[25]; tmpQN2[26] = +tmpObjSEndTerm[26]; tmpQN2[27] = +tmpObjSEndTerm[27]; tmpQN2[28] = +tmpObjSEndTerm[28]; tmpQN2[29] = +tmpObjSEndTerm[29]; tmpQN2[30] = +tmpObjSEndTerm[30]; tmpQN2[31] = +tmpObjSEndTerm[31]; tmpQN2[32] = +tmpObjSEndTerm[32]; tmpQN2[33] = +tmpObjSEndTerm[33]; tmpQN2[34] = +tmpObjSEndTerm[34]; tmpQN2[35] = +tmpObjSEndTerm[35]; tmpQN2[36] = +tmpObjSEndTerm[36]; tmpQN2[37] = +tmpObjSEndTerm[37]; tmpQN2[38] = +tmpObjSEndTerm[38]; tmpQN2[39] = +tmpObjSEndTerm[39]; tmpQN2[40] = +tmpObjSEndTerm[40]; tmpQN2[41] = +tmpObjSEndTerm[41]; tmpQN2[42] = +tmpObjSEndTerm[42]; tmpQN2[43] = +tmpObjSEndTerm[43]; tmpQN2[44] = +tmpObjSEndTerm[44]; tmpQN2[45] = +tmpObjSEndTerm[45]; tmpQN2[46] = +tmpObjSEndTerm[46]; tmpQN2[47] = +tmpObjSEndTerm[47]; tmpQN2[48] = +tmpObjSEndTerm[48]; tmpQN2[49] = +tmpObjSEndTerm[49]; tmpQN2[50] = +tmpObjSEndTerm[50]; tmpQN2[51] = +tmpObjSEndTerm[51]; tmpQN2[52] = +tmpObjSEndTerm[52]; tmpQN2[53] = +tmpObjSEndTerm[53]; tmpQN2[54] = +tmpObjSEndTerm[54]; tmpQN2[55] = +tmpObjSEndTerm[55]; tmpQN2[56] = +tmpObjSEndTerm[56]; tmpQN2[57] = +tmpObjSEndTerm[57]; tmpQN2[58] = +tmpObjSEndTerm[58]; tmpQN2[59] = +tmpObjSEndTerm[59]; tmpQN2[60] = +tmpObjSEndTerm[60]; tmpQN2[61] = +tmpObjSEndTerm[61]; tmpQN2[62] = +tmpObjSEndTerm[62]; tmpQN2[63] = +tmpObjSEndTerm[63]; tmpQN2[64] = +tmpObjSEndTerm[64]; tmpQN2[65] = +tmpObjSEndTerm[65]; tmpQN2[66] = +tmpObjSEndTerm[66]; tmpQN2[67] = +tmpObjSEndTerm[67]; tmpQN2[68] = +tmpObjSEndTerm[68]; tmpQN2[69] = +tmpObjSEndTerm[69]; tmpQN2[70] = +tmpObjSEndTerm[70]; tmpQN2[71] = +tmpObjSEndTerm[71]; tmpQN2[72] = +tmpObjSEndTerm[72]; tmpQN2[73] = +tmpObjSEndTerm[73]; tmpQN2[74] = +tmpObjSEndTerm[74]; tmpQN2[75] = +tmpObjSEndTerm[75]; tmpQN2[76] = +tmpObjSEndTerm[76]; tmpQN2[77] = +tmpObjSEndTerm[77]; tmpQN2[78] = +tmpObjSEndTerm[78]; tmpQN2[79] = +tmpObjSEndTerm[79]; tmpQN2[80] = +tmpObjSEndTerm[80]; tmpQN2[81] = +tmpObjSEndTerm[81]; tmpQN2[82] = +tmpObjSEndTerm[82]; tmpQN2[83] = +tmpObjSEndTerm[83]; tmpQN2[84] = +tmpObjSEndTerm[84]; tmpQN2[85] = +tmpObjSEndTerm[85]; tmpQN2[86] = +tmpObjSEndTerm[86]; tmpQN2[87] = +tmpObjSEndTerm[87]; tmpQN2[88] = +tmpObjSEndTerm[88]; tmpQN2[89] = +tmpObjSEndTerm[89]; tmpQN2[90] = +tmpObjSEndTerm[90]; tmpQN2[91] = +tmpObjSEndTerm[91]; tmpQN2[92] = +tmpObjSEndTerm[92]; tmpQN2[93] = +tmpObjSEndTerm[93]; tmpQN2[94] = +tmpObjSEndTerm[94]; tmpQN2[95] = +tmpObjSEndTerm[95]; tmpQN2[96] = +tmpObjSEndTerm[96]; tmpQN2[97] = +tmpObjSEndTerm[97]; tmpQN2[98] = +tmpObjSEndTerm[98]; tmpQN2[99] = +tmpObjSEndTerm[99]; tmpQN1[0] = + tmpQN2[0]; tmpQN1[1] = + tmpQN2[1]; tmpQN1[2] = + tmpQN2[2]; tmpQN1[3] = + tmpQN2[3]; tmpQN1[4] = + tmpQN2[4]; tmpQN1[5] = + tmpQN2[5]; tmpQN1[6] = + tmpQN2[6]; tmpQN1[7] = + tmpQN2[7]; tmpQN1[8] = + tmpQN2[8]; tmpQN1[9] = + tmpQN2[9]; tmpQN1[10] = + tmpQN2[10]; tmpQN1[11] = + tmpQN2[11]; tmpQN1[12] = + tmpQN2[12]; tmpQN1[13] = + tmpQN2[13]; tmpQN1[14] = + tmpQN2[14]; tmpQN1[15] = + tmpQN2[15]; tmpQN1[16] = + tmpQN2[16]; tmpQN1[17] = + tmpQN2[17]; tmpQN1[18] = + tmpQN2[18]; tmpQN1[19] = + tmpQN2[19]; tmpQN1[20] = + tmpQN2[20]; tmpQN1[21] = + tmpQN2[21]; tmpQN1[22] = + tmpQN2[22]; tmpQN1[23] = + tmpQN2[23]; tmpQN1[24] = + tmpQN2[24]; tmpQN1[25] = + tmpQN2[25]; tmpQN1[26] = + tmpQN2[26]; tmpQN1[27] = + tmpQN2[27]; tmpQN1[28] = + tmpQN2[28]; tmpQN1[29] = + tmpQN2[29]; tmpQN1[30] = + tmpQN2[30]; tmpQN1[31] = + tmpQN2[31]; tmpQN1[32] = + tmpQN2[32]; tmpQN1[33] = + tmpQN2[33]; tmpQN1[34] = + tmpQN2[34]; tmpQN1[35] = + tmpQN2[35]; tmpQN1[36] = + tmpQN2[36]; tmpQN1[37] = + tmpQN2[37]; tmpQN1[38] = + tmpQN2[38]; tmpQN1[39] = + tmpQN2[39]; tmpQN1[40] = + tmpQN2[40]; tmpQN1[41] = + tmpQN2[41]; tmpQN1[42] = + tmpQN2[42]; tmpQN1[43] = + tmpQN2[43]; tmpQN1[44] = + tmpQN2[44]; tmpQN1[45] = + tmpQN2[45]; tmpQN1[46] = + tmpQN2[46]; tmpQN1[47] = + tmpQN2[47]; tmpQN1[48] = + tmpQN2[48]; tmpQN1[49] = + tmpQN2[49]; tmpQN1[50] = + tmpQN2[50]; tmpQN1[51] = + tmpQN2[51]; tmpQN1[52] = + tmpQN2[52]; tmpQN1[53] = + tmpQN2[53]; tmpQN1[54] = + tmpQN2[54]; tmpQN1[55] = + tmpQN2[55]; tmpQN1[56] = + tmpQN2[56]; tmpQN1[57] = + tmpQN2[57]; tmpQN1[58] = + tmpQN2[58]; tmpQN1[59] = + tmpQN2[59]; tmpQN1[60] = + tmpQN2[60]; tmpQN1[61] = + tmpQN2[61]; tmpQN1[62] = + tmpQN2[62]; tmpQN1[63] = + tmpQN2[63]; tmpQN1[64] = + tmpQN2[64]; tmpQN1[65] = + tmpQN2[65]; tmpQN1[66] = + tmpQN2[66]; tmpQN1[67] = + tmpQN2[67]; tmpQN1[68] = + tmpQN2[68]; tmpQN1[69] = + tmpQN2[69]; tmpQN1[70] = + tmpQN2[70]; tmpQN1[71] = + tmpQN2[71]; tmpQN1[72] = + tmpQN2[72]; tmpQN1[73] = + tmpQN2[73]; tmpQN1[74] = + tmpQN2[74]; tmpQN1[75] = + tmpQN2[75]; tmpQN1[76] = + tmpQN2[76]; tmpQN1[77] = + tmpQN2[77]; tmpQN1[78] = + tmpQN2[78]; tmpQN1[79] = + tmpQN2[79]; tmpQN1[80] = + tmpQN2[80]; tmpQN1[81] = + tmpQN2[81]; tmpQN1[82] = + tmpQN2[82]; tmpQN1[83] = + tmpQN2[83]; tmpQN1[84] = + tmpQN2[84]; tmpQN1[85] = + tmpQN2[85]; tmpQN1[86] = + tmpQN2[86]; tmpQN1[87] = + tmpQN2[87]; tmpQN1[88] = + tmpQN2[88]; tmpQN1[89] = + tmpQN2[89]; tmpQN1[90] = + tmpQN2[90]; tmpQN1[91] = + tmpQN2[91]; tmpQN1[92] = + tmpQN2[92]; tmpQN1[93] = + tmpQN2[93]; tmpQN1[94] = + tmpQN2[94]; tmpQN1[95] = + tmpQN2[95]; tmpQN1[96] = + tmpQN2[96]; tmpQN1[97] = + tmpQN2[97]; tmpQN1[98] = + tmpQN2[98]; tmpQN1[99] = + tmpQN2[99]; } void acado_evaluateObjective( ) { int runObj; for (runObj = 0; runObj < 20; ++runObj) { acadoWorkspace.objValueIn[0] = acadoVariables.x[runObj * 10]; acadoWorkspace.objValueIn[1] = acadoVariables.x[runObj * 10 + 1]; acadoWorkspace.objValueIn[2] = acadoVariables.x[runObj * 10 + 2]; acadoWorkspace.objValueIn[3] = acadoVariables.x[runObj * 10 + 3]; acadoWorkspace.objValueIn[4] = acadoVariables.x[runObj * 10 + 4]; acadoWorkspace.objValueIn[5] = acadoVariables.x[runObj * 10 + 5]; acadoWorkspace.objValueIn[6] = acadoVariables.x[runObj * 10 + 6]; acadoWorkspace.objValueIn[7] = acadoVariables.x[runObj * 10 + 7]; acadoWorkspace.objValueIn[8] = acadoVariables.x[runObj * 10 + 8]; acadoWorkspace.objValueIn[9] = acadoVariables.x[runObj * 10 + 9]; acadoWorkspace.objValueIn[10] = acadoVariables.u[runObj * 4]; acadoWorkspace.objValueIn[11] = acadoVariables.u[runObj * 4 + 1]; acadoWorkspace.objValueIn[12] = acadoVariables.u[runObj * 4 + 2]; acadoWorkspace.objValueIn[13] = acadoVariables.u[runObj * 4 + 3]; acadoWorkspace.objValueIn[14] = acadoVariables.od[runObj * 10]; acadoWorkspace.objValueIn[15] = acadoVariables.od[runObj * 10 + 1]; acadoWorkspace.objValueIn[16] = acadoVariables.od[runObj * 10 + 2]; acadoWorkspace.objValueIn[17] = acadoVariables.od[runObj * 10 + 3]; acadoWorkspace.objValueIn[18] = acadoVariables.od[runObj * 10 + 4]; acadoWorkspace.objValueIn[19] = acadoVariables.od[runObj * 10 + 5]; acadoWorkspace.objValueIn[20] = acadoVariables.od[runObj * 10 + 6]; acadoWorkspace.objValueIn[21] = acadoVariables.od[runObj * 10 + 7]; acadoWorkspace.objValueIn[22] = acadoVariables.od[runObj * 10 + 8]; acadoWorkspace.objValueIn[23] = acadoVariables.od[runObj * 10 + 9]; acado_evaluateLSQ( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut ); acadoWorkspace.Dy[runObj * 14] = acadoWorkspace.objValueOut[0]; acadoWorkspace.Dy[runObj * 14 + 1] = acadoWorkspace.objValueOut[1]; acadoWorkspace.Dy[runObj * 14 + 2] = acadoWorkspace.objValueOut[2]; acadoWorkspace.Dy[runObj * 14 + 3] = acadoWorkspace.objValueOut[3]; acadoWorkspace.Dy[runObj * 14 + 4] = acadoWorkspace.objValueOut[4]; acadoWorkspace.Dy[runObj * 14 + 5] = acadoWorkspace.objValueOut[5]; acadoWorkspace.Dy[runObj * 14 + 6] = acadoWorkspace.objValueOut[6]; acadoWorkspace.Dy[runObj * 14 + 7] = acadoWorkspace.objValueOut[7]; acadoWorkspace.Dy[runObj * 14 + 8] = acadoWorkspace.objValueOut[8]; acadoWorkspace.Dy[runObj * 14 + 9] = acadoWorkspace.objValueOut[9]; acadoWorkspace.Dy[runObj * 14 + 10] = acadoWorkspace.objValueOut[10]; acadoWorkspace.Dy[runObj * 14 + 11] = acadoWorkspace.objValueOut[11]; acadoWorkspace.Dy[runObj * 14 + 12] = acadoWorkspace.objValueOut[12]; acadoWorkspace.Dy[runObj * 14 + 13] = acadoWorkspace.objValueOut[13]; acado_setObjQ1Q2( &(acadoVariables.W[ runObj * 196 ]), &(acadoWorkspace.Q1[ runObj * 100 ]), &(acadoWorkspace.Q2[ runObj * 140 ]) ); acado_setObjR1R2( &(acadoVariables.W[ runObj * 196 ]), &(acadoWorkspace.R1[ runObj * 16 ]), &(acadoWorkspace.R2[ runObj * 56 ]) ); } acadoWorkspace.objValueIn[0] = acadoVariables.x[200]; acadoWorkspace.objValueIn[1] = acadoVariables.x[201]; acadoWorkspace.objValueIn[2] = acadoVariables.x[202]; acadoWorkspace.objValueIn[3] = acadoVariables.x[203]; acadoWorkspace.objValueIn[4] = acadoVariables.x[204]; acadoWorkspace.objValueIn[5] = acadoVariables.x[205]; acadoWorkspace.objValueIn[6] = acadoVariables.x[206]; acadoWorkspace.objValueIn[7] = acadoVariables.x[207]; acadoWorkspace.objValueIn[8] = acadoVariables.x[208]; acadoWorkspace.objValueIn[9] = acadoVariables.x[209]; acadoWorkspace.objValueIn[10] = acadoVariables.od[200]; acadoWorkspace.objValueIn[11] = acadoVariables.od[201]; acadoWorkspace.objValueIn[12] = acadoVariables.od[202]; acadoWorkspace.objValueIn[13] = acadoVariables.od[203]; acadoWorkspace.objValueIn[14] = acadoVariables.od[204]; acadoWorkspace.objValueIn[15] = acadoVariables.od[205]; acadoWorkspace.objValueIn[16] = acadoVariables.od[206]; acadoWorkspace.objValueIn[17] = acadoVariables.od[207]; acadoWorkspace.objValueIn[18] = acadoVariables.od[208]; acadoWorkspace.objValueIn[19] = acadoVariables.od[209]; acado_evaluateLSQEndTerm( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut ); acadoWorkspace.DyN[0] = acadoWorkspace.objValueOut[0]; acadoWorkspace.DyN[1] = acadoWorkspace.objValueOut[1]; acadoWorkspace.DyN[2] = acadoWorkspace.objValueOut[2]; acadoWorkspace.DyN[3] = acadoWorkspace.objValueOut[3]; acadoWorkspace.DyN[4] = acadoWorkspace.objValueOut[4]; acadoWorkspace.DyN[5] = acadoWorkspace.objValueOut[5]; acadoWorkspace.DyN[6] = acadoWorkspace.objValueOut[6]; acadoWorkspace.DyN[7] = acadoWorkspace.objValueOut[7]; acadoWorkspace.DyN[8] = acadoWorkspace.objValueOut[8]; acadoWorkspace.DyN[9] = acadoWorkspace.objValueOut[9]; acado_setObjQN1QN2( acadoVariables.WN, acadoWorkspace.QN1, acadoWorkspace.QN2 ); } void acado_moveGxT( real_t* const Gx1, real_t* const Gx2 ) { Gx2[0] = Gx1[0]; Gx2[1] = Gx1[1]; Gx2[2] = Gx1[2]; Gx2[3] = Gx1[3]; Gx2[4] = Gx1[4]; Gx2[5] = Gx1[5]; Gx2[6] = Gx1[6]; Gx2[7] = Gx1[7]; Gx2[8] = Gx1[8]; Gx2[9] = Gx1[9]; Gx2[10] = Gx1[10]; Gx2[11] = Gx1[11]; Gx2[12] = Gx1[12]; Gx2[13] = Gx1[13]; Gx2[14] = Gx1[14]; Gx2[15] = Gx1[15]; Gx2[16] = Gx1[16]; Gx2[17] = Gx1[17]; Gx2[18] = Gx1[18]; Gx2[19] = Gx1[19]; Gx2[20] = Gx1[20]; Gx2[21] = Gx1[21]; Gx2[22] = Gx1[22]; Gx2[23] = Gx1[23]; Gx2[24] = Gx1[24]; Gx2[25] = Gx1[25]; Gx2[26] = Gx1[26]; Gx2[27] = Gx1[27]; Gx2[28] = Gx1[28]; Gx2[29] = Gx1[29]; Gx2[30] = Gx1[30]; Gx2[31] = Gx1[31]; Gx2[32] = Gx1[32]; Gx2[33] = Gx1[33]; Gx2[34] = Gx1[34]; Gx2[35] = Gx1[35]; Gx2[36] = Gx1[36]; Gx2[37] = Gx1[37]; Gx2[38] = Gx1[38]; Gx2[39] = Gx1[39]; Gx2[40] = Gx1[40]; Gx2[41] = Gx1[41]; Gx2[42] = Gx1[42]; Gx2[43] = Gx1[43]; Gx2[44] = Gx1[44]; Gx2[45] = Gx1[45]; Gx2[46] = Gx1[46]; Gx2[47] = Gx1[47]; Gx2[48] = Gx1[48]; Gx2[49] = Gx1[49]; Gx2[50] = Gx1[50]; Gx2[51] = Gx1[51]; Gx2[52] = Gx1[52]; Gx2[53] = Gx1[53]; Gx2[54] = Gx1[54]; Gx2[55] = Gx1[55]; Gx2[56] = Gx1[56]; Gx2[57] = Gx1[57]; Gx2[58] = Gx1[58]; Gx2[59] = Gx1[59]; Gx2[60] = Gx1[60]; Gx2[61] = Gx1[61]; Gx2[62] = Gx1[62]; Gx2[63] = Gx1[63]; Gx2[64] = Gx1[64]; Gx2[65] = Gx1[65]; Gx2[66] = Gx1[66]; Gx2[67] = Gx1[67]; Gx2[68] = Gx1[68]; Gx2[69] = Gx1[69]; Gx2[70] = Gx1[70]; Gx2[71] = Gx1[71]; Gx2[72] = Gx1[72]; Gx2[73] = Gx1[73]; Gx2[74] = Gx1[74]; Gx2[75] = Gx1[75]; Gx2[76] = Gx1[76]; Gx2[77] = Gx1[77]; Gx2[78] = Gx1[78]; Gx2[79] = Gx1[79]; Gx2[80] = Gx1[80]; Gx2[81] = Gx1[81]; Gx2[82] = Gx1[82]; Gx2[83] = Gx1[83]; Gx2[84] = Gx1[84]; Gx2[85] = Gx1[85]; Gx2[86] = Gx1[86]; Gx2[87] = Gx1[87]; Gx2[88] = Gx1[88]; Gx2[89] = Gx1[89]; Gx2[90] = Gx1[90]; Gx2[91] = Gx1[91]; Gx2[92] = Gx1[92]; Gx2[93] = Gx1[93]; Gx2[94] = Gx1[94]; Gx2[95] = Gx1[95]; Gx2[96] = Gx1[96]; Gx2[97] = Gx1[97]; Gx2[98] = Gx1[98]; Gx2[99] = Gx1[99]; } void acado_multGxGx( real_t* const Gx1, real_t* const Gx2, real_t* const Gx3 ) { Gx3[0] = + Gx1[0]*Gx2[0] + Gx1[1]*Gx2[10] + Gx1[2]*Gx2[20] + Gx1[3]*Gx2[30] + Gx1[4]*Gx2[40] + Gx1[5]*Gx2[50] + Gx1[6]*Gx2[60] + Gx1[7]*Gx2[70] + Gx1[8]*Gx2[80] + Gx1[9]*Gx2[90]; Gx3[1] = + Gx1[0]*Gx2[1] + Gx1[1]*Gx2[11] + Gx1[2]*Gx2[21] + Gx1[3]*Gx2[31] + Gx1[4]*Gx2[41] + Gx1[5]*Gx2[51] + Gx1[6]*Gx2[61] + Gx1[7]*Gx2[71] + Gx1[8]*Gx2[81] + Gx1[9]*Gx2[91]; Gx3[2] = + Gx1[0]*Gx2[2] + Gx1[1]*Gx2[12] + Gx1[2]*Gx2[22] + Gx1[3]*Gx2[32] + Gx1[4]*Gx2[42] + Gx1[5]*Gx2[52] + Gx1[6]*Gx2[62] + Gx1[7]*Gx2[72] + Gx1[8]*Gx2[82] + Gx1[9]*Gx2[92]; Gx3[3] = + Gx1[0]*Gx2[3] + Gx1[1]*Gx2[13] + Gx1[2]*Gx2[23] + Gx1[3]*Gx2[33] + Gx1[4]*Gx2[43] + Gx1[5]*Gx2[53] + Gx1[6]*Gx2[63] + Gx1[7]*Gx2[73] + Gx1[8]*Gx2[83] + Gx1[9]*Gx2[93]; Gx3[4] = + Gx1[0]*Gx2[4] + Gx1[1]*Gx2[14] + Gx1[2]*Gx2[24] + Gx1[3]*Gx2[34] + Gx1[4]*Gx2[44] + Gx1[5]*Gx2[54] + Gx1[6]*Gx2[64] + Gx1[7]*Gx2[74] + Gx1[8]*Gx2[84] + Gx1[9]*Gx2[94]; Gx3[5] = + Gx1[0]*Gx2[5] + Gx1[1]*Gx2[15] + Gx1[2]*Gx2[25] + Gx1[3]*Gx2[35] + Gx1[4]*Gx2[45] + Gx1[5]*Gx2[55] + Gx1[6]*Gx2[65] + Gx1[7]*Gx2[75] + Gx1[8]*Gx2[85] + Gx1[9]*Gx2[95]; Gx3[6] = + Gx1[0]*Gx2[6] + Gx1[1]*Gx2[16] + Gx1[2]*Gx2[26] + Gx1[3]*Gx2[36] + Gx1[4]*Gx2[46] + Gx1[5]*Gx2[56] + Gx1[6]*Gx2[66] + Gx1[7]*Gx2[76] + Gx1[8]*Gx2[86] + Gx1[9]*Gx2[96]; Gx3[7] = + Gx1[0]*Gx2[7] + Gx1[1]*Gx2[17] + Gx1[2]*Gx2[27] + Gx1[3]*Gx2[37] + Gx1[4]*Gx2[47] + Gx1[5]*Gx2[57] + Gx1[6]*Gx2[67] + Gx1[7]*Gx2[77] + Gx1[8]*Gx2[87] + Gx1[9]*Gx2[97]; Gx3[8] = + Gx1[0]*Gx2[8] + Gx1[1]*Gx2[18] + Gx1[2]*Gx2[28] + Gx1[3]*Gx2[38] + Gx1[4]*Gx2[48] + Gx1[5]*Gx2[58] + Gx1[6]*Gx2[68] + Gx1[7]*Gx2[78] + Gx1[8]*Gx2[88] + Gx1[9]*Gx2[98]; Gx3[9] = + Gx1[0]*Gx2[9] + Gx1[1]*Gx2[19] + Gx1[2]*Gx2[29] + Gx1[3]*Gx2[39] + Gx1[4]*Gx2[49] + Gx1[5]*Gx2[59] + Gx1[6]*Gx2[69] + Gx1[7]*Gx2[79] + Gx1[8]*Gx2[89] + Gx1[9]*Gx2[99]; Gx3[10] = + Gx1[10]*Gx2[0] + Gx1[11]*Gx2[10] + Gx1[12]*Gx2[20] + Gx1[13]*Gx2[30] + Gx1[14]*Gx2[40] + Gx1[15]*Gx2[50] + Gx1[16]*Gx2[60] + Gx1[17]*Gx2[70] + Gx1[18]*Gx2[80] + Gx1[19]*Gx2[90]; Gx3[11] = + Gx1[10]*Gx2[1] + Gx1[11]*Gx2[11] + Gx1[12]*Gx2[21] + Gx1[13]*Gx2[31] + Gx1[14]*Gx2[41] + Gx1[15]*Gx2[51] + Gx1[16]*Gx2[61] + Gx1[17]*Gx2[71] + Gx1[18]*Gx2[81] + Gx1[19]*Gx2[91]; Gx3[12] = + Gx1[10]*Gx2[2] + Gx1[11]*Gx2[12] + Gx1[12]*Gx2[22] + Gx1[13]*Gx2[32] + Gx1[14]*Gx2[42] + Gx1[15]*Gx2[52] + Gx1[16]*Gx2[62] + Gx1[17]*Gx2[72] + Gx1[18]*Gx2[82] + Gx1[19]*Gx2[92]; Gx3[13] = + Gx1[10]*Gx2[3] + Gx1[11]*Gx2[13] + Gx1[12]*Gx2[23] + Gx1[13]*Gx2[33] + Gx1[14]*Gx2[43] + Gx1[15]*Gx2[53] + Gx1[16]*Gx2[63] + Gx1[17]*Gx2[73] + Gx1[18]*Gx2[83] + Gx1[19]*Gx2[93]; Gx3[14] = + Gx1[10]*Gx2[4] + Gx1[11]*Gx2[14] + Gx1[12]*Gx2[24] + Gx1[13]*Gx2[34] + Gx1[14]*Gx2[44] + Gx1[15]*Gx2[54] + Gx1[16]*Gx2[64] + Gx1[17]*Gx2[74] + Gx1[18]*Gx2[84] + Gx1[19]*Gx2[94]; Gx3[15] = + Gx1[10]*Gx2[5] + Gx1[11]*Gx2[15] + Gx1[12]*Gx2[25] + Gx1[13]*Gx2[35] + Gx1[14]*Gx2[45] + Gx1[15]*Gx2[55] + Gx1[16]*Gx2[65] + Gx1[17]*Gx2[75] + Gx1[18]*Gx2[85] + Gx1[19]*Gx2[95]; Gx3[16] = + Gx1[10]*Gx2[6] + Gx1[11]*Gx2[16] + Gx1[12]*Gx2[26] + Gx1[13]*Gx2[36] + Gx1[14]*Gx2[46] + Gx1[15]*Gx2[56] + Gx1[16]*Gx2[66] + Gx1[17]*Gx2[76] + Gx1[18]*Gx2[86] + Gx1[19]*Gx2[96]; Gx3[17] = + Gx1[10]*Gx2[7] + Gx1[11]*Gx2[17] + Gx1[12]*Gx2[27] + Gx1[13]*Gx2[37] + Gx1[14]*Gx2[47] + Gx1[15]*Gx2[57] + Gx1[16]*Gx2[67] + Gx1[17]*Gx2[77] + Gx1[18]*Gx2[87] + Gx1[19]*Gx2[97]; Gx3[18] = + Gx1[10]*Gx2[8] + Gx1[11]*Gx2[18] + Gx1[12]*Gx2[28] + Gx1[13]*Gx2[38] + Gx1[14]*Gx2[48] + Gx1[15]*Gx2[58] + Gx1[16]*Gx2[68] + Gx1[17]*Gx2[78] + Gx1[18]*Gx2[88] + Gx1[19]*Gx2[98]; Gx3[19] = + Gx1[10]*Gx2[9] + Gx1[11]*Gx2[19] + Gx1[12]*Gx2[29] + Gx1[13]*Gx2[39] + Gx1[14]*Gx2[49] + Gx1[15]*Gx2[59] + Gx1[16]*Gx2[69] + Gx1[17]*Gx2[79] + Gx1[18]*Gx2[89] + Gx1[19]*Gx2[99]; Gx3[20] = + Gx1[20]*Gx2[0] + Gx1[21]*Gx2[10] + Gx1[22]*Gx2[20] + Gx1[23]*Gx2[30] + Gx1[24]*Gx2[40] + Gx1[25]*Gx2[50] + Gx1[26]*Gx2[60] + Gx1[27]*Gx2[70] + Gx1[28]*Gx2[80] + Gx1[29]*Gx2[90]; Gx3[21] = + Gx1[20]*Gx2[1] + Gx1[21]*Gx2[11] + Gx1[22]*Gx2[21] + Gx1[23]*Gx2[31] + Gx1[24]*Gx2[41] + Gx1[25]*Gx2[51] + Gx1[26]*Gx2[61] + Gx1[27]*Gx2[71] + Gx1[28]*Gx2[81] + Gx1[29]*Gx2[91]; Gx3[22] = + Gx1[20]*Gx2[2] + Gx1[21]*Gx2[12] + Gx1[22]*Gx2[22] + Gx1[23]*Gx2[32] + Gx1[24]*Gx2[42] + Gx1[25]*Gx2[52] + Gx1[26]*Gx2[62] + Gx1[27]*Gx2[72] + Gx1[28]*Gx2[82] + Gx1[29]*Gx2[92]; Gx3[23] = + Gx1[20]*Gx2[3] + Gx1[21]*Gx2[13] + Gx1[22]*Gx2[23] + Gx1[23]*Gx2[33] + Gx1[24]*Gx2[43] + Gx1[25]*Gx2[53] + Gx1[26]*Gx2[63] + Gx1[27]*Gx2[73] + Gx1[28]*Gx2[83] + Gx1[29]*Gx2[93]; Gx3[24] = + Gx1[20]*Gx2[4] + Gx1[21]*Gx2[14] + Gx1[22]*Gx2[24] + Gx1[23]*Gx2[34] + Gx1[24]*Gx2[44] + Gx1[25]*Gx2[54] + Gx1[26]*Gx2[64] + Gx1[27]*Gx2[74] + Gx1[28]*Gx2[84] + Gx1[29]*Gx2[94]; Gx3[25] = + Gx1[20]*Gx2[5] + Gx1[21]*Gx2[15] + Gx1[22]*Gx2[25] + Gx1[23]*Gx2[35] + Gx1[24]*Gx2[45] + Gx1[25]*Gx2[55] + Gx1[26]*Gx2[65] + Gx1[27]*Gx2[75] + Gx1[28]*Gx2[85] + Gx1[29]*Gx2[95]; Gx3[26] = + Gx1[20]*Gx2[6] + Gx1[21]*Gx2[16] + Gx1[22]*Gx2[26] + Gx1[23]*Gx2[36] + Gx1[24]*Gx2[46] + Gx1[25]*Gx2[56] + Gx1[26]*Gx2[66] + Gx1[27]*Gx2[76] + Gx1[28]*Gx2[86] + Gx1[29]*Gx2[96]; Gx3[27] = + Gx1[20]*Gx2[7] + Gx1[21]*Gx2[17] + Gx1[22]*Gx2[27] + Gx1[23]*Gx2[37] + Gx1[24]*Gx2[47] + Gx1[25]*Gx2[57] + Gx1[26]*Gx2[67] + Gx1[27]*Gx2[77] + Gx1[28]*Gx2[87] + Gx1[29]*Gx2[97]; Gx3[28] = + Gx1[20]*Gx2[8] + Gx1[21]*Gx2[18] + Gx1[22]*Gx2[28] + Gx1[23]*Gx2[38] + Gx1[24]*Gx2[48] + Gx1[25]*Gx2[58] + Gx1[26]*Gx2[68] + Gx1[27]*Gx2[78] + Gx1[28]*Gx2[88] + Gx1[29]*Gx2[98]; Gx3[29] = + Gx1[20]*Gx2[9] + Gx1[21]*Gx2[19] + Gx1[22]*Gx2[29] + Gx1[23]*Gx2[39] + Gx1[24]*Gx2[49] + Gx1[25]*Gx2[59] + Gx1[26]*Gx2[69] + Gx1[27]*Gx2[79] + Gx1[28]*Gx2[89] + Gx1[29]*Gx2[99]; Gx3[30] = + Gx1[30]*Gx2[0] + Gx1[31]*Gx2[10] + Gx1[32]*Gx2[20] + Gx1[33]*Gx2[30] + Gx1[34]*Gx2[40] + Gx1[35]*Gx2[50] + Gx1[36]*Gx2[60] + Gx1[37]*Gx2[70] + Gx1[38]*Gx2[80] + Gx1[39]*Gx2[90]; Gx3[31] = + Gx1[30]*Gx2[1] + Gx1[31]*Gx2[11] + Gx1[32]*Gx2[21] + Gx1[33]*Gx2[31] + Gx1[34]*Gx2[41] + Gx1[35]*Gx2[51] + Gx1[36]*Gx2[61] + Gx1[37]*Gx2[71] + Gx1[38]*Gx2[81] + Gx1[39]*Gx2[91]; Gx3[32] = + Gx1[30]*Gx2[2] + Gx1[31]*Gx2[12] + Gx1[32]*Gx2[22] + Gx1[33]*Gx2[32] + Gx1[34]*Gx2[42] + Gx1[35]*Gx2[52] + Gx1[36]*Gx2[62] + Gx1[37]*Gx2[72] + Gx1[38]*Gx2[82] + Gx1[39]*Gx2[92]; Gx3[33] = + Gx1[30]*Gx2[3] + Gx1[31]*Gx2[13] + Gx1[32]*Gx2[23] + Gx1[33]*Gx2[33] + Gx1[34]*Gx2[43] + Gx1[35]*Gx2[53] + Gx1[36]*Gx2[63] + Gx1[37]*Gx2[73] + Gx1[38]*Gx2[83] + Gx1[39]*Gx2[93]; Gx3[34] = + Gx1[30]*Gx2[4] + Gx1[31]*Gx2[14] + Gx1[32]*Gx2[24] + Gx1[33]*Gx2[34] + Gx1[34]*Gx2[44] + Gx1[35]*Gx2[54] + Gx1[36]*Gx2[64] + Gx1[37]*Gx2[74] + Gx1[38]*Gx2[84] + Gx1[39]*Gx2[94]; Gx3[35] = + Gx1[30]*Gx2[5] + Gx1[31]*Gx2[15] + Gx1[32]*Gx2[25] + Gx1[33]*Gx2[35] + Gx1[34]*Gx2[45] + Gx1[35]*Gx2[55] + Gx1[36]*Gx2[65] + Gx1[37]*Gx2[75] + Gx1[38]*Gx2[85] + Gx1[39]*Gx2[95]; Gx3[36] = + Gx1[30]*Gx2[6] + Gx1[31]*Gx2[16] + Gx1[32]*Gx2[26] + Gx1[33]*Gx2[36] + Gx1[34]*Gx2[46] + Gx1[35]*Gx2[56] + Gx1[36]*Gx2[66] + Gx1[37]*Gx2[76] + Gx1[38]*Gx2[86] + Gx1[39]*Gx2[96]; Gx3[37] = + Gx1[30]*Gx2[7] + Gx1[31]*Gx2[17] + Gx1[32]*Gx2[27] + Gx1[33]*Gx2[37] + Gx1[34]*Gx2[47] + Gx1[35]*Gx2[57] + Gx1[36]*Gx2[67] + Gx1[37]*Gx2[77] + Gx1[38]*Gx2[87] + Gx1[39]*Gx2[97]; Gx3[38] = + Gx1[30]*Gx2[8] + Gx1[31]*Gx2[18] + Gx1[32]*Gx2[28] + Gx1[33]*Gx2[38] + Gx1[34]*Gx2[48] + Gx1[35]*Gx2[58] + Gx1[36]*Gx2[68] + Gx1[37]*Gx2[78] + Gx1[38]*Gx2[88] + Gx1[39]*Gx2[98]; Gx3[39] = + Gx1[30]*Gx2[9] + Gx1[31]*Gx2[19] + Gx1[32]*Gx2[29] + Gx1[33]*Gx2[39] + Gx1[34]*Gx2[49] + Gx1[35]*Gx2[59] + Gx1[36]*Gx2[69] + Gx1[37]*Gx2[79] + Gx1[38]*Gx2[89] + Gx1[39]*Gx2[99]; Gx3[40] = + Gx1[40]*Gx2[0] + Gx1[41]*Gx2[10] + Gx1[42]*Gx2[20] + Gx1[43]*Gx2[30] + Gx1[44]*Gx2[40] + Gx1[45]*Gx2[50] + Gx1[46]*Gx2[60] + Gx1[47]*Gx2[70] + Gx1[48]*Gx2[80] + Gx1[49]*Gx2[90]; Gx3[41] = + Gx1[40]*Gx2[1] + Gx1[41]*Gx2[11] + Gx1[42]*Gx2[21] + Gx1[43]*Gx2[31] + Gx1[44]*Gx2[41] + Gx1[45]*Gx2[51] + Gx1[46]*Gx2[61] + Gx1[47]*Gx2[71] + Gx1[48]*Gx2[81] + Gx1[49]*Gx2[91]; Gx3[42] = + Gx1[40]*Gx2[2] + Gx1[41]*Gx2[12] + Gx1[42]*Gx2[22] + Gx1[43]*Gx2[32] + Gx1[44]*Gx2[42] + Gx1[45]*Gx2[52] + Gx1[46]*Gx2[62] + Gx1[47]*Gx2[72] + Gx1[48]*Gx2[82] + Gx1[49]*Gx2[92]; Gx3[43] = + Gx1[40]*Gx2[3] + Gx1[41]*Gx2[13] + Gx1[42]*Gx2[23] + Gx1[43]*Gx2[33] + Gx1[44]*Gx2[43] + Gx1[45]*Gx2[53] + Gx1[46]*Gx2[63] + Gx1[47]*Gx2[73] + Gx1[48]*Gx2[83] + Gx1[49]*Gx2[93]; Gx3[44] = + Gx1[40]*Gx2[4] + Gx1[41]*Gx2[14] + Gx1[42]*Gx2[24] + Gx1[43]*Gx2[34] + Gx1[44]*Gx2[44] + Gx1[45]*Gx2[54] + Gx1[46]*Gx2[64] + Gx1[47]*Gx2[74] + Gx1[48]*Gx2[84] + Gx1[49]*Gx2[94]; Gx3[45] = + Gx1[40]*Gx2[5] + Gx1[41]*Gx2[15] + Gx1[42]*Gx2[25] + Gx1[43]*Gx2[35] + Gx1[44]*Gx2[45] + Gx1[45]*Gx2[55] + Gx1[46]*Gx2[65] + Gx1[47]*Gx2[75] + Gx1[48]*Gx2[85] + Gx1[49]*Gx2[95]; Gx3[46] = + Gx1[40]*Gx2[6] + Gx1[41]*Gx2[16] + Gx1[42]*Gx2[26] + Gx1[43]*Gx2[36] + Gx1[44]*Gx2[46] + Gx1[45]*Gx2[56] + Gx1[46]*Gx2[66] + Gx1[47]*Gx2[76] + Gx1[48]*Gx2[86] + Gx1[49]*Gx2[96]; Gx3[47] = + Gx1[40]*Gx2[7] + Gx1[41]*Gx2[17] + Gx1[42]*Gx2[27] + Gx1[43]*Gx2[37] + Gx1[44]*Gx2[47] + Gx1[45]*Gx2[57] + Gx1[46]*Gx2[67] + Gx1[47]*Gx2[77] + Gx1[48]*Gx2[87] + Gx1[49]*Gx2[97]; Gx3[48] = + Gx1[40]*Gx2[8] + Gx1[41]*Gx2[18] + Gx1[42]*Gx2[28] + Gx1[43]*Gx2[38] + Gx1[44]*Gx2[48] + Gx1[45]*Gx2[58] + Gx1[46]*Gx2[68] + Gx1[47]*Gx2[78] + Gx1[48]*Gx2[88] + Gx1[49]*Gx2[98]; Gx3[49] = + Gx1[40]*Gx2[9] + Gx1[41]*Gx2[19] + Gx1[42]*Gx2[29] + Gx1[43]*Gx2[39] + Gx1[44]*Gx2[49] + Gx1[45]*Gx2[59] + Gx1[46]*Gx2[69] + Gx1[47]*Gx2[79] + Gx1[48]*Gx2[89] + Gx1[49]*Gx2[99]; Gx3[50] = + Gx1[50]*Gx2[0] + Gx1[51]*Gx2[10] + Gx1[52]*Gx2[20] + Gx1[53]*Gx2[30] + Gx1[54]*Gx2[40] + Gx1[55]*Gx2[50] + Gx1[56]*Gx2[60] + Gx1[57]*Gx2[70] + Gx1[58]*Gx2[80] + Gx1[59]*Gx2[90]; Gx3[51] = + Gx1[50]*Gx2[1] + Gx1[51]*Gx2[11] + Gx1[52]*Gx2[21] + Gx1[53]*Gx2[31] + Gx1[54]*Gx2[41] + Gx1[55]*Gx2[51] + Gx1[56]*Gx2[61] + Gx1[57]*Gx2[71] + Gx1[58]*Gx2[81] + Gx1[59]*Gx2[91]; Gx3[52] = + Gx1[50]*Gx2[2] + Gx1[51]*Gx2[12] + Gx1[52]*Gx2[22] + Gx1[53]*Gx2[32] + Gx1[54]*Gx2[42] + Gx1[55]*Gx2[52] + Gx1[56]*Gx2[62] + Gx1[57]*Gx2[72] + Gx1[58]*Gx2[82] + Gx1[59]*Gx2[92]; Gx3[53] = + Gx1[50]*Gx2[3] + Gx1[51]*Gx2[13] + Gx1[52]*Gx2[23] + Gx1[53]*Gx2[33] + Gx1[54]*Gx2[43] + Gx1[55]*Gx2[53] + Gx1[56]*Gx2[63] + Gx1[57]*Gx2[73] + Gx1[58]*Gx2[83] + Gx1[59]*Gx2[93]; Gx3[54] = + Gx1[50]*Gx2[4] + Gx1[51]*Gx2[14] + Gx1[52]*Gx2[24] + Gx1[53]*Gx2[34] + Gx1[54]*Gx2[44] + Gx1[55]*Gx2[54] + Gx1[56]*Gx2[64] + Gx1[57]*Gx2[74] + Gx1[58]*Gx2[84] + Gx1[59]*Gx2[94]; Gx3[55] = + Gx1[50]*Gx2[5] + Gx1[51]*Gx2[15] + Gx1[52]*Gx2[25] + Gx1[53]*Gx2[35] + Gx1[54]*Gx2[45] + Gx1[55]*Gx2[55] + Gx1[56]*Gx2[65] + Gx1[57]*Gx2[75] + Gx1[58]*Gx2[85] + Gx1[59]*Gx2[95]; Gx3[56] = + Gx1[50]*Gx2[6] + Gx1[51]*Gx2[16] + Gx1[52]*Gx2[26] + Gx1[53]*Gx2[36] + Gx1[54]*Gx2[46] + Gx1[55]*Gx2[56] + Gx1[56]*Gx2[66] + Gx1[57]*Gx2[76] + Gx1[58]*Gx2[86] + Gx1[59]*Gx2[96]; Gx3[57] = + Gx1[50]*Gx2[7] + Gx1[51]*Gx2[17] + Gx1[52]*Gx2[27] + Gx1[53]*Gx2[37] + Gx1[54]*Gx2[47] + Gx1[55]*Gx2[57] + Gx1[56]*Gx2[67] + Gx1[57]*Gx2[77] + Gx1[58]*Gx2[87] + Gx1[59]*Gx2[97]; Gx3[58] = + Gx1[50]*Gx2[8] + Gx1[51]*Gx2[18] + Gx1[52]*Gx2[28] + Gx1[53]*Gx2[38] + Gx1[54]*Gx2[48] + Gx1[55]*Gx2[58] + Gx1[56]*Gx2[68] + Gx1[57]*Gx2[78] + Gx1[58]*Gx2[88] + Gx1[59]*Gx2[98]; Gx3[59] = + Gx1[50]*Gx2[9] + Gx1[51]*Gx2[19] + Gx1[52]*Gx2[29] + Gx1[53]*Gx2[39] + Gx1[54]*Gx2[49] + Gx1[55]*Gx2[59] + Gx1[56]*Gx2[69] + Gx1[57]*Gx2[79] + Gx1[58]*Gx2[89] + Gx1[59]*Gx2[99]; Gx3[60] = + Gx1[60]*Gx2[0] + Gx1[61]*Gx2[10] + Gx1[62]*Gx2[20] + Gx1[63]*Gx2[30] + Gx1[64]*Gx2[40] + Gx1[65]*Gx2[50] + Gx1[66]*Gx2[60] + Gx1[67]*Gx2[70] + Gx1[68]*Gx2[80] + Gx1[69]*Gx2[90]; Gx3[61] = + Gx1[60]*Gx2[1] + Gx1[61]*Gx2[11] + Gx1[62]*Gx2[21] + Gx1[63]*Gx2[31] + Gx1[64]*Gx2[41] + Gx1[65]*Gx2[51] + Gx1[66]*Gx2[61] + Gx1[67]*Gx2[71] + Gx1[68]*Gx2[81] + Gx1[69]*Gx2[91]; Gx3[62] = + Gx1[60]*Gx2[2] + Gx1[61]*Gx2[12] + Gx1[62]*Gx2[22] + Gx1[63]*Gx2[32] + Gx1[64]*Gx2[42] + Gx1[65]*Gx2[52] + Gx1[66]*Gx2[62] + Gx1[67]*Gx2[72] + Gx1[68]*Gx2[82] + Gx1[69]*Gx2[92]; Gx3[63] = + Gx1[60]*Gx2[3] + Gx1[61]*Gx2[13] + Gx1[62]*Gx2[23] + Gx1[63]*Gx2[33] + Gx1[64]*Gx2[43] + Gx1[65]*Gx2[53] + Gx1[66]*Gx2[63] + Gx1[67]*Gx2[73] + Gx1[68]*Gx2[83] + Gx1[69]*Gx2[93]; Gx3[64] = + Gx1[60]*Gx2[4] + Gx1[61]*Gx2[14] + Gx1[62]*Gx2[24] + Gx1[63]*Gx2[34] + Gx1[64]*Gx2[44] + Gx1[65]*Gx2[54] + Gx1[66]*Gx2[64] + Gx1[67]*Gx2[74] + Gx1[68]*Gx2[84] + Gx1[69]*Gx2[94]; Gx3[65] = + Gx1[60]*Gx2[5] + Gx1[61]*Gx2[15] + Gx1[62]*Gx2[25] + Gx1[63]*Gx2[35] + Gx1[64]*Gx2[45] + Gx1[65]*Gx2[55] + Gx1[66]*Gx2[65] + Gx1[67]*Gx2[75] + Gx1[68]*Gx2[85] + Gx1[69]*Gx2[95]; Gx3[66] = + Gx1[60]*Gx2[6] + Gx1[61]*Gx2[16] + Gx1[62]*Gx2[26] + Gx1[63]*Gx2[36] + Gx1[64]*Gx2[46] + Gx1[65]*Gx2[56] + Gx1[66]*Gx2[66] + Gx1[67]*Gx2[76] + Gx1[68]*Gx2[86] + Gx1[69]*Gx2[96]; Gx3[67] = + Gx1[60]*Gx2[7] + Gx1[61]*Gx2[17] + Gx1[62]*Gx2[27] + Gx1[63]*Gx2[37] + Gx1[64]*Gx2[47] + Gx1[65]*Gx2[57] + Gx1[66]*Gx2[67] + Gx1[67]*Gx2[77] + Gx1[68]*Gx2[87] + Gx1[69]*Gx2[97]; Gx3[68] = + Gx1[60]*Gx2[8] + Gx1[61]*Gx2[18] + Gx1[62]*Gx2[28] + Gx1[63]*Gx2[38] + Gx1[64]*Gx2[48] + Gx1[65]*Gx2[58] + Gx1[66]*Gx2[68] + Gx1[67]*Gx2[78] + Gx1[68]*Gx2[88] + Gx1[69]*Gx2[98]; Gx3[69] = + Gx1[60]*Gx2[9] + Gx1[61]*Gx2[19] + Gx1[62]*Gx2[29] + Gx1[63]*Gx2[39] + Gx1[64]*Gx2[49] + Gx1[65]*Gx2[59] + Gx1[66]*Gx2[69] + Gx1[67]*Gx2[79] + Gx1[68]*Gx2[89] + Gx1[69]*Gx2[99]; Gx3[70] = + Gx1[70]*Gx2[0] + Gx1[71]*Gx2[10] + Gx1[72]*Gx2[20] + Gx1[73]*Gx2[30] + Gx1[74]*Gx2[40] + Gx1[75]*Gx2[50] + Gx1[76]*Gx2[60] + Gx1[77]*Gx2[70] + Gx1[78]*Gx2[80] + Gx1[79]*Gx2[90]; Gx3[71] = + Gx1[70]*Gx2[1] + Gx1[71]*Gx2[11] + Gx1[72]*Gx2[21] + Gx1[73]*Gx2[31] + Gx1[74]*Gx2[41] + Gx1[75]*Gx2[51] + Gx1[76]*Gx2[61] + Gx1[77]*Gx2[71] + Gx1[78]*Gx2[81] + Gx1[79]*Gx2[91]; Gx3[72] = + Gx1[70]*Gx2[2] + Gx1[71]*Gx2[12] + Gx1[72]*Gx2[22] + Gx1[73]*Gx2[32] + Gx1[74]*Gx2[42] + Gx1[75]*Gx2[52] + Gx1[76]*Gx2[62] + Gx1[77]*Gx2[72] + Gx1[78]*Gx2[82] + Gx1[79]*Gx2[92]; Gx3[73] = + Gx1[70]*Gx2[3] + Gx1[71]*Gx2[13] + Gx1[72]*Gx2[23] + Gx1[73]*Gx2[33] + Gx1[74]*Gx2[43] + Gx1[75]*Gx2[53] + Gx1[76]*Gx2[63] + Gx1[77]*Gx2[73] + Gx1[78]*Gx2[83] + Gx1[79]*Gx2[93]; Gx3[74] = + Gx1[70]*Gx2[4] + Gx1[71]*Gx2[14] + Gx1[72]*Gx2[24] + Gx1[73]*Gx2[34] + Gx1[74]*Gx2[44] + Gx1[75]*Gx2[54] + Gx1[76]*Gx2[64] + Gx1[77]*Gx2[74] + Gx1[78]*Gx2[84] + Gx1[79]*Gx2[94]; Gx3[75] = + Gx1[70]*Gx2[5] + Gx1[71]*Gx2[15] + Gx1[72]*Gx2[25] + Gx1[73]*Gx2[35] + Gx1[74]*Gx2[45] + Gx1[75]*Gx2[55] + Gx1[76]*Gx2[65] + Gx1[77]*Gx2[75] + Gx1[78]*Gx2[85] + Gx1[79]*Gx2[95]; Gx3[76] = + Gx1[70]*Gx2[6] + Gx1[71]*Gx2[16] + Gx1[72]*Gx2[26] + Gx1[73]*Gx2[36] + Gx1[74]*Gx2[46] + Gx1[75]*Gx2[56] + Gx1[76]*Gx2[66] + Gx1[77]*Gx2[76] + Gx1[78]*Gx2[86] + Gx1[79]*Gx2[96]; Gx3[77] = + Gx1[70]*Gx2[7] + Gx1[71]*Gx2[17] + Gx1[72]*Gx2[27] + Gx1[73]*Gx2[37] + Gx1[74]*Gx2[47] + Gx1[75]*Gx2[57] + Gx1[76]*Gx2[67] + Gx1[77]*Gx2[77] + Gx1[78]*Gx2[87] + Gx1[79]*Gx2[97]; Gx3[78] = + Gx1[70]*Gx2[8] + Gx1[71]*Gx2[18] + Gx1[72]*Gx2[28] + Gx1[73]*Gx2[38] + Gx1[74]*Gx2[48] + Gx1[75]*Gx2[58] + Gx1[76]*Gx2[68] + Gx1[77]*Gx2[78] + Gx1[78]*Gx2[88] + Gx1[79]*Gx2[98]; Gx3[79] = + Gx1[70]*Gx2[9] + Gx1[71]*Gx2[19] + Gx1[72]*Gx2[29] + Gx1[73]*Gx2[39] + Gx1[74]*Gx2[49] + Gx1[75]*Gx2[59] + Gx1[76]*Gx2[69] + Gx1[77]*Gx2[79] + Gx1[78]*Gx2[89] + Gx1[79]*Gx2[99]; Gx3[80] = + Gx1[80]*Gx2[0] + Gx1[81]*Gx2[10] + Gx1[82]*Gx2[20] + Gx1[83]*Gx2[30] + Gx1[84]*Gx2[40] + Gx1[85]*Gx2[50] + Gx1[86]*Gx2[60] + Gx1[87]*Gx2[70] + Gx1[88]*Gx2[80] + Gx1[89]*Gx2[90]; Gx3[81] = + Gx1[80]*Gx2[1] + Gx1[81]*Gx2[11] + Gx1[82]*Gx2[21] + Gx1[83]*Gx2[31] + Gx1[84]*Gx2[41] + Gx1[85]*Gx2[51] + Gx1[86]*Gx2[61] + Gx1[87]*Gx2[71] + Gx1[88]*Gx2[81] + Gx1[89]*Gx2[91]; Gx3[82] = + Gx1[80]*Gx2[2] + Gx1[81]*Gx2[12] + Gx1[82]*Gx2[22] + Gx1[83]*Gx2[32] + Gx1[84]*Gx2[42] + Gx1[85]*Gx2[52] + Gx1[86]*Gx2[62] + Gx1[87]*Gx2[72] + Gx1[88]*Gx2[82] + Gx1[89]*Gx2[92]; Gx3[83] = + Gx1[80]*Gx2[3] + Gx1[81]*Gx2[13] + Gx1[82]*Gx2[23] + Gx1[83]*Gx2[33] + Gx1[84]*Gx2[43] + Gx1[85]*Gx2[53] + Gx1[86]*Gx2[63] + Gx1[87]*Gx2[73] + Gx1[88]*Gx2[83] + Gx1[89]*Gx2[93]; Gx3[84] = + Gx1[80]*Gx2[4] + Gx1[81]*Gx2[14] + Gx1[82]*Gx2[24] + Gx1[83]*Gx2[34] + Gx1[84]*Gx2[44] + Gx1[85]*Gx2[54] + Gx1[86]*Gx2[64] + Gx1[87]*Gx2[74] + Gx1[88]*Gx2[84] + Gx1[89]*Gx2[94]; Gx3[85] = + Gx1[80]*Gx2[5] + Gx1[81]*Gx2[15] + Gx1[82]*Gx2[25] + Gx1[83]*Gx2[35] + Gx1[84]*Gx2[45] + Gx1[85]*Gx2[55] + Gx1[86]*Gx2[65] + Gx1[87]*Gx2[75] + Gx1[88]*Gx2[85] + Gx1[89]*Gx2[95]; Gx3[86] = + Gx1[80]*Gx2[6] + Gx1[81]*Gx2[16] + Gx1[82]*Gx2[26] + Gx1[83]*Gx2[36] + Gx1[84]*Gx2[46] + Gx1[85]*Gx2[56] + Gx1[86]*Gx2[66] + Gx1[87]*Gx2[76] + Gx1[88]*Gx2[86] + Gx1[89]*Gx2[96]; Gx3[87] = + Gx1[80]*Gx2[7] + Gx1[81]*Gx2[17] + Gx1[82]*Gx2[27] + Gx1[83]*Gx2[37] + Gx1[84]*Gx2[47] + Gx1[85]*Gx2[57] + Gx1[86]*Gx2[67] + Gx1[87]*Gx2[77] + Gx1[88]*Gx2[87] + Gx1[89]*Gx2[97]; Gx3[88] = + Gx1[80]*Gx2[8] + Gx1[81]*Gx2[18] + Gx1[82]*Gx2[28] + Gx1[83]*Gx2[38] + Gx1[84]*Gx2[48] + Gx1[85]*Gx2[58] + Gx1[86]*Gx2[68] + Gx1[87]*Gx2[78] + Gx1[88]*Gx2[88] + Gx1[89]*Gx2[98]; Gx3[89] = + Gx1[80]*Gx2[9] + Gx1[81]*Gx2[19] + Gx1[82]*Gx2[29] + Gx1[83]*Gx2[39] + Gx1[84]*Gx2[49] + Gx1[85]*Gx2[59] + Gx1[86]*Gx2[69] + Gx1[87]*Gx2[79] + Gx1[88]*Gx2[89] + Gx1[89]*Gx2[99]; Gx3[90] = + Gx1[90]*Gx2[0] + Gx1[91]*Gx2[10] + Gx1[92]*Gx2[20] + Gx1[93]*Gx2[30] + Gx1[94]*Gx2[40] + Gx1[95]*Gx2[50] + Gx1[96]*Gx2[60] + Gx1[97]*Gx2[70] + Gx1[98]*Gx2[80] + Gx1[99]*Gx2[90]; Gx3[91] = + Gx1[90]*Gx2[1] + Gx1[91]*Gx2[11] + Gx1[92]*Gx2[21] + Gx1[93]*Gx2[31] + Gx1[94]*Gx2[41] + Gx1[95]*Gx2[51] + Gx1[96]*Gx2[61] + Gx1[97]*Gx2[71] + Gx1[98]*Gx2[81] + Gx1[99]*Gx2[91]; Gx3[92] = + Gx1[90]*Gx2[2] + Gx1[91]*Gx2[12] + Gx1[92]*Gx2[22] + Gx1[93]*Gx2[32] + Gx1[94]*Gx2[42] + Gx1[95]*Gx2[52] + Gx1[96]*Gx2[62] + Gx1[97]*Gx2[72] + Gx1[98]*Gx2[82] + Gx1[99]*Gx2[92]; Gx3[93] = + Gx1[90]*Gx2[3] + Gx1[91]*Gx2[13] + Gx1[92]*Gx2[23] + Gx1[93]*Gx2[33] + Gx1[94]*Gx2[43] + Gx1[95]*Gx2[53] + Gx1[96]*Gx2[63] + Gx1[97]*Gx2[73] + Gx1[98]*Gx2[83] + Gx1[99]*Gx2[93]; Gx3[94] = + Gx1[90]*Gx2[4] + Gx1[91]*Gx2[14] + Gx1[92]*Gx2[24] + Gx1[93]*Gx2[34] + Gx1[94]*Gx2[44] + Gx1[95]*Gx2[54] + Gx1[96]*Gx2[64] + Gx1[97]*Gx2[74] + Gx1[98]*Gx2[84] + Gx1[99]*Gx2[94]; Gx3[95] = + Gx1[90]*Gx2[5] + Gx1[91]*Gx2[15] + Gx1[92]*Gx2[25] + Gx1[93]*Gx2[35] + Gx1[94]*Gx2[45] + Gx1[95]*Gx2[55] + Gx1[96]*Gx2[65] + Gx1[97]*Gx2[75] + Gx1[98]*Gx2[85] + Gx1[99]*Gx2[95]; Gx3[96] = + Gx1[90]*Gx2[6] + Gx1[91]*Gx2[16] + Gx1[92]*Gx2[26] + Gx1[93]*Gx2[36] + Gx1[94]*Gx2[46] + Gx1[95]*Gx2[56] + Gx1[96]*Gx2[66] + Gx1[97]*Gx2[76] + Gx1[98]*Gx2[86] + Gx1[99]*Gx2[96]; Gx3[97] = + Gx1[90]*Gx2[7] + Gx1[91]*Gx2[17] + Gx1[92]*Gx2[27] + Gx1[93]*Gx2[37] + Gx1[94]*Gx2[47] + Gx1[95]*Gx2[57] + Gx1[96]*Gx2[67] + Gx1[97]*Gx2[77] + Gx1[98]*Gx2[87] + Gx1[99]*Gx2[97]; Gx3[98] = + Gx1[90]*Gx2[8] + Gx1[91]*Gx2[18] + Gx1[92]*Gx2[28] + Gx1[93]*Gx2[38] + Gx1[94]*Gx2[48] + Gx1[95]*Gx2[58] + Gx1[96]*Gx2[68] + Gx1[97]*Gx2[78] + Gx1[98]*Gx2[88] + Gx1[99]*Gx2[98]; Gx3[99] = + Gx1[90]*Gx2[9] + Gx1[91]*Gx2[19] + Gx1[92]*Gx2[29] + Gx1[93]*Gx2[39] + Gx1[94]*Gx2[49] + Gx1[95]*Gx2[59] + Gx1[96]*Gx2[69] + Gx1[97]*Gx2[79] + Gx1[98]*Gx2[89] + Gx1[99]*Gx2[99]; } void acado_multGxGu( real_t* const Gx1, real_t* const Gu1, real_t* const Gu2 ) { Gu2[0] = + Gx1[0]*Gu1[0] + Gx1[1]*Gu1[4] + Gx1[2]*Gu1[8] + Gx1[3]*Gu1[12] + Gx1[4]*Gu1[16] + Gx1[5]*Gu1[20] + Gx1[6]*Gu1[24] + Gx1[7]*Gu1[28] + Gx1[8]*Gu1[32] + Gx1[9]*Gu1[36]; Gu2[1] = + Gx1[0]*Gu1[1] + Gx1[1]*Gu1[5] + Gx1[2]*Gu1[9] + Gx1[3]*Gu1[13] + Gx1[4]*Gu1[17] + Gx1[5]*Gu1[21] + Gx1[6]*Gu1[25] + Gx1[7]*Gu1[29] + Gx1[8]*Gu1[33] + Gx1[9]*Gu1[37]; Gu2[2] = + Gx1[0]*Gu1[2] + Gx1[1]*Gu1[6] + Gx1[2]*Gu1[10] + Gx1[3]*Gu1[14] + Gx1[4]*Gu1[18] + Gx1[5]*Gu1[22] + Gx1[6]*Gu1[26] + Gx1[7]*Gu1[30] + Gx1[8]*Gu1[34] + Gx1[9]*Gu1[38]; Gu2[3] = + Gx1[0]*Gu1[3] + Gx1[1]*Gu1[7] + Gx1[2]*Gu1[11] + Gx1[3]*Gu1[15] + Gx1[4]*Gu1[19] + Gx1[5]*Gu1[23] + Gx1[6]*Gu1[27] + Gx1[7]*Gu1[31] + Gx1[8]*Gu1[35] + Gx1[9]*Gu1[39]; Gu2[4] = + Gx1[10]*Gu1[0] + Gx1[11]*Gu1[4] + Gx1[12]*Gu1[8] + Gx1[13]*Gu1[12] + Gx1[14]*Gu1[16] + Gx1[15]*Gu1[20] + Gx1[16]*Gu1[24] + Gx1[17]*Gu1[28] + Gx1[18]*Gu1[32] + Gx1[19]*Gu1[36]; Gu2[5] = + Gx1[10]*Gu1[1] + Gx1[11]*Gu1[5] + Gx1[12]*Gu1[9] + Gx1[13]*Gu1[13] + Gx1[14]*Gu1[17] + Gx1[15]*Gu1[21] + Gx1[16]*Gu1[25] + Gx1[17]*Gu1[29] + Gx1[18]*Gu1[33] + Gx1[19]*Gu1[37]; Gu2[6] = + Gx1[10]*Gu1[2] + Gx1[11]*Gu1[6] + Gx1[12]*Gu1[10] + Gx1[13]*Gu1[14] + Gx1[14]*Gu1[18] + Gx1[15]*Gu1[22] + Gx1[16]*Gu1[26] + Gx1[17]*Gu1[30] + Gx1[18]*Gu1[34] + Gx1[19]*Gu1[38]; Gu2[7] = + Gx1[10]*Gu1[3] + Gx1[11]*Gu1[7] + Gx1[12]*Gu1[11] + Gx1[13]*Gu1[15] + Gx1[14]*Gu1[19] + Gx1[15]*Gu1[23] + Gx1[16]*Gu1[27] + Gx1[17]*Gu1[31] + Gx1[18]*Gu1[35] + Gx1[19]*Gu1[39]; Gu2[8] = + Gx1[20]*Gu1[0] + Gx1[21]*Gu1[4] + Gx1[22]*Gu1[8] + Gx1[23]*Gu1[12] + Gx1[24]*Gu1[16] + Gx1[25]*Gu1[20] + Gx1[26]*Gu1[24] + Gx1[27]*Gu1[28] + Gx1[28]*Gu1[32] + Gx1[29]*Gu1[36]; Gu2[9] = + Gx1[20]*Gu1[1] + Gx1[21]*Gu1[5] + Gx1[22]*Gu1[9] + Gx1[23]*Gu1[13] + Gx1[24]*Gu1[17] + Gx1[25]*Gu1[21] + Gx1[26]*Gu1[25] + Gx1[27]*Gu1[29] + Gx1[28]*Gu1[33] + Gx1[29]*Gu1[37]; Gu2[10] = + Gx1[20]*Gu1[2] + Gx1[21]*Gu1[6] + Gx1[22]*Gu1[10] + Gx1[23]*Gu1[14] + Gx1[24]*Gu1[18] + Gx1[25]*Gu1[22] + Gx1[26]*Gu1[26] + Gx1[27]*Gu1[30] + Gx1[28]*Gu1[34] + Gx1[29]*Gu1[38]; Gu2[11] = + Gx1[20]*Gu1[3] + Gx1[21]*Gu1[7] + Gx1[22]*Gu1[11] + Gx1[23]*Gu1[15] + Gx1[24]*Gu1[19] + Gx1[25]*Gu1[23] + Gx1[26]*Gu1[27] + Gx1[27]*Gu1[31] + Gx1[28]*Gu1[35] + Gx1[29]*Gu1[39]; Gu2[12] = + Gx1[30]*Gu1[0] + Gx1[31]*Gu1[4] + Gx1[32]*Gu1[8] + Gx1[33]*Gu1[12] + Gx1[34]*Gu1[16] + Gx1[35]*Gu1[20] + Gx1[36]*Gu1[24] + Gx1[37]*Gu1[28] + Gx1[38]*Gu1[32] + Gx1[39]*Gu1[36]; Gu2[13] = + Gx1[30]*Gu1[1] + Gx1[31]*Gu1[5] + Gx1[32]*Gu1[9] + Gx1[33]*Gu1[13] + Gx1[34]*Gu1[17] + Gx1[35]*Gu1[21] + Gx1[36]*Gu1[25] + Gx1[37]*Gu1[29] + Gx1[38]*Gu1[33] + Gx1[39]*Gu1[37]; Gu2[14] = + Gx1[30]*Gu1[2] + Gx1[31]*Gu1[6] + Gx1[32]*Gu1[10] + Gx1[33]*Gu1[14] + Gx1[34]*Gu1[18] + Gx1[35]*Gu1[22] + Gx1[36]*Gu1[26] + Gx1[37]*Gu1[30] + Gx1[38]*Gu1[34] + Gx1[39]*Gu1[38]; Gu2[15] = + Gx1[30]*Gu1[3] + Gx1[31]*Gu1[7] + Gx1[32]*Gu1[11] + Gx1[33]*Gu1[15] + Gx1[34]*Gu1[19] + Gx1[35]*Gu1[23] + Gx1[36]*Gu1[27] + Gx1[37]*Gu1[31] + Gx1[38]*Gu1[35] + Gx1[39]*Gu1[39]; Gu2[16] = + Gx1[40]*Gu1[0] + Gx1[41]*Gu1[4] + Gx1[42]*Gu1[8] + Gx1[43]*Gu1[12] + Gx1[44]*Gu1[16] + Gx1[45]*Gu1[20] + Gx1[46]*Gu1[24] + Gx1[47]*Gu1[28] + Gx1[48]*Gu1[32] + Gx1[49]*Gu1[36]; Gu2[17] = + Gx1[40]*Gu1[1] + Gx1[41]*Gu1[5] + Gx1[42]*Gu1[9] + Gx1[43]*Gu1[13] + Gx1[44]*Gu1[17] + Gx1[45]*Gu1[21] + Gx1[46]*Gu1[25] + Gx1[47]*Gu1[29] + Gx1[48]*Gu1[33] + Gx1[49]*Gu1[37]; Gu2[18] = + Gx1[40]*Gu1[2] + Gx1[41]*Gu1[6] + Gx1[42]*Gu1[10] + Gx1[43]*Gu1[14] + Gx1[44]*Gu1[18] + Gx1[45]*Gu1[22] + Gx1[46]*Gu1[26] + Gx1[47]*Gu1[30] + Gx1[48]*Gu1[34] + Gx1[49]*Gu1[38]; Gu2[19] = + Gx1[40]*Gu1[3] + Gx1[41]*Gu1[7] + Gx1[42]*Gu1[11] + Gx1[43]*Gu1[15] + Gx1[44]*Gu1[19] + Gx1[45]*Gu1[23] + Gx1[46]*Gu1[27] + Gx1[47]*Gu1[31] + Gx1[48]*Gu1[35] + Gx1[49]*Gu1[39]; Gu2[20] = + Gx1[50]*Gu1[0] + Gx1[51]*Gu1[4] + Gx1[52]*Gu1[8] + Gx1[53]*Gu1[12] + Gx1[54]*Gu1[16] + Gx1[55]*Gu1[20] + Gx1[56]*Gu1[24] + Gx1[57]*Gu1[28] + Gx1[58]*Gu1[32] + Gx1[59]*Gu1[36]; Gu2[21] = + Gx1[50]*Gu1[1] + Gx1[51]*Gu1[5] + Gx1[52]*Gu1[9] + Gx1[53]*Gu1[13] + Gx1[54]*Gu1[17] + Gx1[55]*Gu1[21] + Gx1[56]*Gu1[25] + Gx1[57]*Gu1[29] + Gx1[58]*Gu1[33] + Gx1[59]*Gu1[37]; Gu2[22] = + Gx1[50]*Gu1[2] + Gx1[51]*Gu1[6] + Gx1[52]*Gu1[10] + Gx1[53]*Gu1[14] + Gx1[54]*Gu1[18] + Gx1[55]*Gu1[22] + Gx1[56]*Gu1[26] + Gx1[57]*Gu1[30] + Gx1[58]*Gu1[34] + Gx1[59]*Gu1[38]; Gu2[23] = + Gx1[50]*Gu1[3] + Gx1[51]*Gu1[7] + Gx1[52]*Gu1[11] + Gx1[53]*Gu1[15] + Gx1[54]*Gu1[19] + Gx1[55]*Gu1[23] + Gx1[56]*Gu1[27] + Gx1[57]*Gu1[31] + Gx1[58]*Gu1[35] + Gx1[59]*Gu1[39]; Gu2[24] = + Gx1[60]*Gu1[0] + Gx1[61]*Gu1[4] + Gx1[62]*Gu1[8] + Gx1[63]*Gu1[12] + Gx1[64]*Gu1[16] + Gx1[65]*Gu1[20] + Gx1[66]*Gu1[24] + Gx1[67]*Gu1[28] + Gx1[68]*Gu1[32] + Gx1[69]*Gu1[36]; Gu2[25] = + Gx1[60]*Gu1[1] + Gx1[61]*Gu1[5] + Gx1[62]*Gu1[9] + Gx1[63]*Gu1[13] + Gx1[64]*Gu1[17] + Gx1[65]*Gu1[21] + Gx1[66]*Gu1[25] + Gx1[67]*Gu1[29] + Gx1[68]*Gu1[33] + Gx1[69]*Gu1[37]; Gu2[26] = + Gx1[60]*Gu1[2] + Gx1[61]*Gu1[6] + Gx1[62]*Gu1[10] + Gx1[63]*Gu1[14] + Gx1[64]*Gu1[18] + Gx1[65]*Gu1[22] + Gx1[66]*Gu1[26] + Gx1[67]*Gu1[30] + Gx1[68]*Gu1[34] + Gx1[69]*Gu1[38]; Gu2[27] = + Gx1[60]*Gu1[3] + Gx1[61]*Gu1[7] + Gx1[62]*Gu1[11] + Gx1[63]*Gu1[15] + Gx1[64]*Gu1[19] + Gx1[65]*Gu1[23] + Gx1[66]*Gu1[27] + Gx1[67]*Gu1[31] + Gx1[68]*Gu1[35] + Gx1[69]*Gu1[39]; Gu2[28] = + Gx1[70]*Gu1[0] + Gx1[71]*Gu1[4] + Gx1[72]*Gu1[8] + Gx1[73]*Gu1[12] + Gx1[74]*Gu1[16] + Gx1[75]*Gu1[20] + Gx1[76]*Gu1[24] + Gx1[77]*Gu1[28] + Gx1[78]*Gu1[32] + Gx1[79]*Gu1[36]; Gu2[29] = + Gx1[70]*Gu1[1] + Gx1[71]*Gu1[5] + Gx1[72]*Gu1[9] + Gx1[73]*Gu1[13] + Gx1[74]*Gu1[17] + Gx1[75]*Gu1[21] + Gx1[76]*Gu1[25] + Gx1[77]*Gu1[29] + Gx1[78]*Gu1[33] + Gx1[79]*Gu1[37]; Gu2[30] = + Gx1[70]*Gu1[2] + Gx1[71]*Gu1[6] + Gx1[72]*Gu1[10] + Gx1[73]*Gu1[14] + Gx1[74]*Gu1[18] + Gx1[75]*Gu1[22] + Gx1[76]*Gu1[26] + Gx1[77]*Gu1[30] + Gx1[78]*Gu1[34] + Gx1[79]*Gu1[38]; Gu2[31] = + Gx1[70]*Gu1[3] + Gx1[71]*Gu1[7] + Gx1[72]*Gu1[11] + Gx1[73]*Gu1[15] + Gx1[74]*Gu1[19] + Gx1[75]*Gu1[23] + Gx1[76]*Gu1[27] + Gx1[77]*Gu1[31] + Gx1[78]*Gu1[35] + Gx1[79]*Gu1[39]; Gu2[32] = + Gx1[80]*Gu1[0] + Gx1[81]*Gu1[4] + Gx1[82]*Gu1[8] + Gx1[83]*Gu1[12] + Gx1[84]*Gu1[16] + Gx1[85]*Gu1[20] + Gx1[86]*Gu1[24] + Gx1[87]*Gu1[28] + Gx1[88]*Gu1[32] + Gx1[89]*Gu1[36]; Gu2[33] = + Gx1[80]*Gu1[1] + Gx1[81]*Gu1[5] + Gx1[82]*Gu1[9] + Gx1[83]*Gu1[13] + Gx1[84]*Gu1[17] + Gx1[85]*Gu1[21] + Gx1[86]*Gu1[25] + Gx1[87]*Gu1[29] + Gx1[88]*Gu1[33] + Gx1[89]*Gu1[37]; Gu2[34] = + Gx1[80]*Gu1[2] + Gx1[81]*Gu1[6] + Gx1[82]*Gu1[10] + Gx1[83]*Gu1[14] + Gx1[84]*Gu1[18] + Gx1[85]*Gu1[22] + Gx1[86]*Gu1[26] + Gx1[87]*Gu1[30] + Gx1[88]*Gu1[34] + Gx1[89]*Gu1[38]; Gu2[35] = + Gx1[80]*Gu1[3] + Gx1[81]*Gu1[7] + Gx1[82]*Gu1[11] + Gx1[83]*Gu1[15] + Gx1[84]*Gu1[19] + Gx1[85]*Gu1[23] + Gx1[86]*Gu1[27] + Gx1[87]*Gu1[31] + Gx1[88]*Gu1[35] + Gx1[89]*Gu1[39]; Gu2[36] = + Gx1[90]*Gu1[0] + Gx1[91]*Gu1[4] + Gx1[92]*Gu1[8] + Gx1[93]*Gu1[12] + Gx1[94]*Gu1[16] + Gx1[95]*Gu1[20] + Gx1[96]*Gu1[24] + Gx1[97]*Gu1[28] + Gx1[98]*Gu1[32] + Gx1[99]*Gu1[36]; Gu2[37] = + Gx1[90]*Gu1[1] + Gx1[91]*Gu1[5] + Gx1[92]*Gu1[9] + Gx1[93]*Gu1[13] + Gx1[94]*Gu1[17] + Gx1[95]*Gu1[21] + Gx1[96]*Gu1[25] + Gx1[97]*Gu1[29] + Gx1[98]*Gu1[33] + Gx1[99]*Gu1[37]; Gu2[38] = + Gx1[90]*Gu1[2] + Gx1[91]*Gu1[6] + Gx1[92]*Gu1[10] + Gx1[93]*Gu1[14] + Gx1[94]*Gu1[18] + Gx1[95]*Gu1[22] + Gx1[96]*Gu1[26] + Gx1[97]*Gu1[30] + Gx1[98]*Gu1[34] + Gx1[99]*Gu1[38]; Gu2[39] = + Gx1[90]*Gu1[3] + Gx1[91]*Gu1[7] + Gx1[92]*Gu1[11] + Gx1[93]*Gu1[15] + Gx1[94]*Gu1[19] + Gx1[95]*Gu1[23] + Gx1[96]*Gu1[27] + Gx1[97]*Gu1[31] + Gx1[98]*Gu1[35] + Gx1[99]*Gu1[39]; } void acado_moveGuE( real_t* const Gu1, real_t* const Gu2 ) { Gu2[0] = Gu1[0]; Gu2[1] = Gu1[1]; Gu2[2] = Gu1[2]; Gu2[3] = Gu1[3]; Gu2[4] = Gu1[4]; Gu2[5] = Gu1[5]; Gu2[6] = Gu1[6]; Gu2[7] = Gu1[7]; Gu2[8] = Gu1[8]; Gu2[9] = Gu1[9]; Gu2[10] = Gu1[10]; Gu2[11] = Gu1[11]; Gu2[12] = Gu1[12]; Gu2[13] = Gu1[13]; Gu2[14] = Gu1[14]; Gu2[15] = Gu1[15]; Gu2[16] = Gu1[16]; Gu2[17] = Gu1[17]; Gu2[18] = Gu1[18]; Gu2[19] = Gu1[19]; Gu2[20] = Gu1[20]; Gu2[21] = Gu1[21]; Gu2[22] = Gu1[22]; Gu2[23] = Gu1[23]; Gu2[24] = Gu1[24]; Gu2[25] = Gu1[25]; Gu2[26] = Gu1[26]; Gu2[27] = Gu1[27]; Gu2[28] = Gu1[28]; Gu2[29] = Gu1[29]; Gu2[30] = Gu1[30]; Gu2[31] = Gu1[31]; Gu2[32] = Gu1[32]; Gu2[33] = Gu1[33]; Gu2[34] = Gu1[34]; Gu2[35] = Gu1[35]; Gu2[36] = Gu1[36]; Gu2[37] = Gu1[37]; Gu2[38] = Gu1[38]; Gu2[39] = Gu1[39]; } void acado_multBTW1( real_t* const Gu1, real_t* const Gu2, int iRow, int iCol ) { acadoWorkspace.H[(iRow * 320) + (iCol * 4)] = + Gu1[0]*Gu2[0] + Gu1[4]*Gu2[4] + Gu1[8]*Gu2[8] + Gu1[12]*Gu2[12] + Gu1[16]*Gu2[16] + Gu1[20]*Gu2[20] + Gu1[24]*Gu2[24] + Gu1[28]*Gu2[28] + Gu1[32]*Gu2[32] + Gu1[36]*Gu2[36]; acadoWorkspace.H[(iRow * 320) + (iCol * 4 + 1)] = + Gu1[0]*Gu2[1] + Gu1[4]*Gu2[5] + Gu1[8]*Gu2[9] + Gu1[12]*Gu2[13] + Gu1[16]*Gu2[17] + Gu1[20]*Gu2[21] + Gu1[24]*Gu2[25] + Gu1[28]*Gu2[29] + Gu1[32]*Gu2[33] + Gu1[36]*Gu2[37]; acadoWorkspace.H[(iRow * 320) + (iCol * 4 + 2)] = + Gu1[0]*Gu2[2] + Gu1[4]*Gu2[6] + Gu1[8]*Gu2[10] + Gu1[12]*Gu2[14] + Gu1[16]*Gu2[18] + Gu1[20]*Gu2[22] + Gu1[24]*Gu2[26] + Gu1[28]*Gu2[30] + Gu1[32]*Gu2[34] + Gu1[36]*Gu2[38]; acadoWorkspace.H[(iRow * 320) + (iCol * 4 + 3)] = + Gu1[0]*Gu2[3] + Gu1[4]*Gu2[7] + Gu1[8]*Gu2[11] + Gu1[12]*Gu2[15] + Gu1[16]*Gu2[19] + Gu1[20]*Gu2[23] + Gu1[24]*Gu2[27] + Gu1[28]*Gu2[31] + Gu1[32]*Gu2[35] + Gu1[36]*Gu2[39]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4)] = + Gu1[1]*Gu2[0] + Gu1[5]*Gu2[4] + Gu1[9]*Gu2[8] + Gu1[13]*Gu2[12] + Gu1[17]*Gu2[16] + Gu1[21]*Gu2[20] + Gu1[25]*Gu2[24] + Gu1[29]*Gu2[28] + Gu1[33]*Gu2[32] + Gu1[37]*Gu2[36]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4 + 1)] = + Gu1[1]*Gu2[1] + Gu1[5]*Gu2[5] + Gu1[9]*Gu2[9] + Gu1[13]*Gu2[13] + Gu1[17]*Gu2[17] + Gu1[21]*Gu2[21] + Gu1[25]*Gu2[25] + Gu1[29]*Gu2[29] + Gu1[33]*Gu2[33] + Gu1[37]*Gu2[37]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4 + 2)] = + Gu1[1]*Gu2[2] + Gu1[5]*Gu2[6] + Gu1[9]*Gu2[10] + Gu1[13]*Gu2[14] + Gu1[17]*Gu2[18] + Gu1[21]*Gu2[22] + Gu1[25]*Gu2[26] + Gu1[29]*Gu2[30] + Gu1[33]*Gu2[34] + Gu1[37]*Gu2[38]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4 + 3)] = + Gu1[1]*Gu2[3] + Gu1[5]*Gu2[7] + Gu1[9]*Gu2[11] + Gu1[13]*Gu2[15] + Gu1[17]*Gu2[19] + Gu1[21]*Gu2[23] + Gu1[25]*Gu2[27] + Gu1[29]*Gu2[31] + Gu1[33]*Gu2[35] + Gu1[37]*Gu2[39]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4)] = + Gu1[2]*Gu2[0] + Gu1[6]*Gu2[4] + Gu1[10]*Gu2[8] + Gu1[14]*Gu2[12] + Gu1[18]*Gu2[16] + Gu1[22]*Gu2[20] + Gu1[26]*Gu2[24] + Gu1[30]*Gu2[28] + Gu1[34]*Gu2[32] + Gu1[38]*Gu2[36]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4 + 1)] = + Gu1[2]*Gu2[1] + Gu1[6]*Gu2[5] + Gu1[10]*Gu2[9] + Gu1[14]*Gu2[13] + Gu1[18]*Gu2[17] + Gu1[22]*Gu2[21] + Gu1[26]*Gu2[25] + Gu1[30]*Gu2[29] + Gu1[34]*Gu2[33] + Gu1[38]*Gu2[37]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4 + 2)] = + Gu1[2]*Gu2[2] + Gu1[6]*Gu2[6] + Gu1[10]*Gu2[10] + Gu1[14]*Gu2[14] + Gu1[18]*Gu2[18] + Gu1[22]*Gu2[22] + Gu1[26]*Gu2[26] + Gu1[30]*Gu2[30] + Gu1[34]*Gu2[34] + Gu1[38]*Gu2[38]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4 + 3)] = + Gu1[2]*Gu2[3] + Gu1[6]*Gu2[7] + Gu1[10]*Gu2[11] + Gu1[14]*Gu2[15] + Gu1[18]*Gu2[19] + Gu1[22]*Gu2[23] + Gu1[26]*Gu2[27] + Gu1[30]*Gu2[31] + Gu1[34]*Gu2[35] + Gu1[38]*Gu2[39]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4)] = + Gu1[3]*Gu2[0] + Gu1[7]*Gu2[4] + Gu1[11]*Gu2[8] + Gu1[15]*Gu2[12] + Gu1[19]*Gu2[16] + Gu1[23]*Gu2[20] + Gu1[27]*Gu2[24] + Gu1[31]*Gu2[28] + Gu1[35]*Gu2[32] + Gu1[39]*Gu2[36]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4 + 1)] = + Gu1[3]*Gu2[1] + Gu1[7]*Gu2[5] + Gu1[11]*Gu2[9] + Gu1[15]*Gu2[13] + Gu1[19]*Gu2[17] + Gu1[23]*Gu2[21] + Gu1[27]*Gu2[25] + Gu1[31]*Gu2[29] + Gu1[35]*Gu2[33] + Gu1[39]*Gu2[37]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4 + 2)] = + Gu1[3]*Gu2[2] + Gu1[7]*Gu2[6] + Gu1[11]*Gu2[10] + Gu1[15]*Gu2[14] + Gu1[19]*Gu2[18] + Gu1[23]*Gu2[22] + Gu1[27]*Gu2[26] + Gu1[31]*Gu2[30] + Gu1[35]*Gu2[34] + Gu1[39]*Gu2[38]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4 + 3)] = + Gu1[3]*Gu2[3] + Gu1[7]*Gu2[7] + Gu1[11]*Gu2[11] + Gu1[15]*Gu2[15] + Gu1[19]*Gu2[19] + Gu1[23]*Gu2[23] + Gu1[27]*Gu2[27] + Gu1[31]*Gu2[31] + Gu1[35]*Gu2[35] + Gu1[39]*Gu2[39]; } void acado_multBTW1_R1( real_t* const R11, real_t* const Gu1, real_t* const Gu2, int iRow ) { acadoWorkspace.H[iRow * 324] = + Gu1[0]*Gu2[0] + Gu1[4]*Gu2[4] + Gu1[8]*Gu2[8] + Gu1[12]*Gu2[12] + Gu1[16]*Gu2[16] + Gu1[20]*Gu2[20] + Gu1[24]*Gu2[24] + Gu1[28]*Gu2[28] + Gu1[32]*Gu2[32] + Gu1[36]*Gu2[36] + R11[0]; acadoWorkspace.H[iRow * 324 + 1] = + Gu1[0]*Gu2[1] + Gu1[4]*Gu2[5] + Gu1[8]*Gu2[9] + Gu1[12]*Gu2[13] + Gu1[16]*Gu2[17] + Gu1[20]*Gu2[21] + Gu1[24]*Gu2[25] + Gu1[28]*Gu2[29] + Gu1[32]*Gu2[33] + Gu1[36]*Gu2[37] + R11[1]; acadoWorkspace.H[iRow * 324 + 2] = + Gu1[0]*Gu2[2] + Gu1[4]*Gu2[6] + Gu1[8]*Gu2[10] + Gu1[12]*Gu2[14] + Gu1[16]*Gu2[18] + Gu1[20]*Gu2[22] + Gu1[24]*Gu2[26] + Gu1[28]*Gu2[30] + Gu1[32]*Gu2[34] + Gu1[36]*Gu2[38] + R11[2]; acadoWorkspace.H[iRow * 324 + 3] = + Gu1[0]*Gu2[3] + Gu1[4]*Gu2[7] + Gu1[8]*Gu2[11] + Gu1[12]*Gu2[15] + Gu1[16]*Gu2[19] + Gu1[20]*Gu2[23] + Gu1[24]*Gu2[27] + Gu1[28]*Gu2[31] + Gu1[32]*Gu2[35] + Gu1[36]*Gu2[39] + R11[3]; acadoWorkspace.H[iRow * 324 + 80] = + Gu1[1]*Gu2[0] + Gu1[5]*Gu2[4] + Gu1[9]*Gu2[8] + Gu1[13]*Gu2[12] + Gu1[17]*Gu2[16] + Gu1[21]*Gu2[20] + Gu1[25]*Gu2[24] + Gu1[29]*Gu2[28] + Gu1[33]*Gu2[32] + Gu1[37]*Gu2[36] + R11[4]; acadoWorkspace.H[iRow * 324 + 81] = + Gu1[1]*Gu2[1] + Gu1[5]*Gu2[5] + Gu1[9]*Gu2[9] + Gu1[13]*Gu2[13] + Gu1[17]*Gu2[17] + Gu1[21]*Gu2[21] + Gu1[25]*Gu2[25] + Gu1[29]*Gu2[29] + Gu1[33]*Gu2[33] + Gu1[37]*Gu2[37] + R11[5]; acadoWorkspace.H[iRow * 324 + 82] = + Gu1[1]*Gu2[2] + Gu1[5]*Gu2[6] + Gu1[9]*Gu2[10] + Gu1[13]*Gu2[14] + Gu1[17]*Gu2[18] + Gu1[21]*Gu2[22] + Gu1[25]*Gu2[26] + Gu1[29]*Gu2[30] + Gu1[33]*Gu2[34] + Gu1[37]*Gu2[38] + R11[6]; acadoWorkspace.H[iRow * 324 + 83] = + Gu1[1]*Gu2[3] + Gu1[5]*Gu2[7] + Gu1[9]*Gu2[11] + Gu1[13]*Gu2[15] + Gu1[17]*Gu2[19] + Gu1[21]*Gu2[23] + Gu1[25]*Gu2[27] + Gu1[29]*Gu2[31] + Gu1[33]*Gu2[35] + Gu1[37]*Gu2[39] + R11[7]; acadoWorkspace.H[iRow * 324 + 160] = + Gu1[2]*Gu2[0] + Gu1[6]*Gu2[4] + Gu1[10]*Gu2[8] + Gu1[14]*Gu2[12] + Gu1[18]*Gu2[16] + Gu1[22]*Gu2[20] + Gu1[26]*Gu2[24] + Gu1[30]*Gu2[28] + Gu1[34]*Gu2[32] + Gu1[38]*Gu2[36] + R11[8]; acadoWorkspace.H[iRow * 324 + 161] = + Gu1[2]*Gu2[1] + Gu1[6]*Gu2[5] + Gu1[10]*Gu2[9] + Gu1[14]*Gu2[13] + Gu1[18]*Gu2[17] + Gu1[22]*Gu2[21] + Gu1[26]*Gu2[25] + Gu1[30]*Gu2[29] + Gu1[34]*Gu2[33] + Gu1[38]*Gu2[37] + R11[9]; acadoWorkspace.H[iRow * 324 + 162] = + Gu1[2]*Gu2[2] + Gu1[6]*Gu2[6] + Gu1[10]*Gu2[10] + Gu1[14]*Gu2[14] + Gu1[18]*Gu2[18] + Gu1[22]*Gu2[22] + Gu1[26]*Gu2[26] + Gu1[30]*Gu2[30] + Gu1[34]*Gu2[34] + Gu1[38]*Gu2[38] + R11[10]; acadoWorkspace.H[iRow * 324 + 163] = + Gu1[2]*Gu2[3] + Gu1[6]*Gu2[7] + Gu1[10]*Gu2[11] + Gu1[14]*Gu2[15] + Gu1[18]*Gu2[19] + Gu1[22]*Gu2[23] + Gu1[26]*Gu2[27] + Gu1[30]*Gu2[31] + Gu1[34]*Gu2[35] + Gu1[38]*Gu2[39] + R11[11]; acadoWorkspace.H[iRow * 324 + 240] = + Gu1[3]*Gu2[0] + Gu1[7]*Gu2[4] + Gu1[11]*Gu2[8] + Gu1[15]*Gu2[12] + Gu1[19]*Gu2[16] + Gu1[23]*Gu2[20] + Gu1[27]*Gu2[24] + Gu1[31]*Gu2[28] + Gu1[35]*Gu2[32] + Gu1[39]*Gu2[36] + R11[12]; acadoWorkspace.H[iRow * 324 + 241] = + Gu1[3]*Gu2[1] + Gu1[7]*Gu2[5] + Gu1[11]*Gu2[9] + Gu1[15]*Gu2[13] + Gu1[19]*Gu2[17] + Gu1[23]*Gu2[21] + Gu1[27]*Gu2[25] + Gu1[31]*Gu2[29] + Gu1[35]*Gu2[33] + Gu1[39]*Gu2[37] + R11[13]; acadoWorkspace.H[iRow * 324 + 242] = + Gu1[3]*Gu2[2] + Gu1[7]*Gu2[6] + Gu1[11]*Gu2[10] + Gu1[15]*Gu2[14] + Gu1[19]*Gu2[18] + Gu1[23]*Gu2[22] + Gu1[27]*Gu2[26] + Gu1[31]*Gu2[30] + Gu1[35]*Gu2[34] + Gu1[39]*Gu2[38] + R11[14]; acadoWorkspace.H[iRow * 324 + 243] = + Gu1[3]*Gu2[3] + Gu1[7]*Gu2[7] + Gu1[11]*Gu2[11] + Gu1[15]*Gu2[15] + Gu1[19]*Gu2[19] + Gu1[23]*Gu2[23] + Gu1[27]*Gu2[27] + Gu1[31]*Gu2[31] + Gu1[35]*Gu2[35] + Gu1[39]*Gu2[39] + R11[15]; } void acado_multGxTGu( real_t* const Gx1, real_t* const Gu1, real_t* const Gu2 ) { Gu2[0] = + Gx1[0]*Gu1[0] + Gx1[10]*Gu1[4] + Gx1[20]*Gu1[8] + Gx1[30]*Gu1[12] + Gx1[40]*Gu1[16] + Gx1[50]*Gu1[20] + Gx1[60]*Gu1[24] + Gx1[70]*Gu1[28] + Gx1[80]*Gu1[32] + Gx1[90]*Gu1[36]; Gu2[1] = + Gx1[0]*Gu1[1] + Gx1[10]*Gu1[5] + Gx1[20]*Gu1[9] + Gx1[30]*Gu1[13] + Gx1[40]*Gu1[17] + Gx1[50]*Gu1[21] + Gx1[60]*Gu1[25] + Gx1[70]*Gu1[29] + Gx1[80]*Gu1[33] + Gx1[90]*Gu1[37]; Gu2[2] = + Gx1[0]*Gu1[2] + Gx1[10]*Gu1[6] + Gx1[20]*Gu1[10] + Gx1[30]*Gu1[14] + Gx1[40]*Gu1[18] + Gx1[50]*Gu1[22] + Gx1[60]*Gu1[26] + Gx1[70]*Gu1[30] + Gx1[80]*Gu1[34] + Gx1[90]*Gu1[38]; Gu2[3] = + Gx1[0]*Gu1[3] + Gx1[10]*Gu1[7] + Gx1[20]*Gu1[11] + Gx1[30]*Gu1[15] + Gx1[40]*Gu1[19] + Gx1[50]*Gu1[23] + Gx1[60]*Gu1[27] + Gx1[70]*Gu1[31] + Gx1[80]*Gu1[35] + Gx1[90]*Gu1[39]; Gu2[4] = + Gx1[1]*Gu1[0] + Gx1[11]*Gu1[4] + Gx1[21]*Gu1[8] + Gx1[31]*Gu1[12] + Gx1[41]*Gu1[16] + Gx1[51]*Gu1[20] + Gx1[61]*Gu1[24] + Gx1[71]*Gu1[28] + Gx1[81]*Gu1[32] + Gx1[91]*Gu1[36]; Gu2[5] = + Gx1[1]*Gu1[1] + Gx1[11]*Gu1[5] + Gx1[21]*Gu1[9] + Gx1[31]*Gu1[13] + Gx1[41]*Gu1[17] + Gx1[51]*Gu1[21] + Gx1[61]*Gu1[25] + Gx1[71]*Gu1[29] + Gx1[81]*Gu1[33] + Gx1[91]*Gu1[37]; Gu2[6] = + Gx1[1]*Gu1[2] + Gx1[11]*Gu1[6] + Gx1[21]*Gu1[10] + Gx1[31]*Gu1[14] + Gx1[41]*Gu1[18] + Gx1[51]*Gu1[22] + Gx1[61]*Gu1[26] + Gx1[71]*Gu1[30] + Gx1[81]*Gu1[34] + Gx1[91]*Gu1[38]; Gu2[7] = + Gx1[1]*Gu1[3] + Gx1[11]*Gu1[7] + Gx1[21]*Gu1[11] + Gx1[31]*Gu1[15] + Gx1[41]*Gu1[19] + Gx1[51]*Gu1[23] + Gx1[61]*Gu1[27] + Gx1[71]*Gu1[31] + Gx1[81]*Gu1[35] + Gx1[91]*Gu1[39]; Gu2[8] = + Gx1[2]*Gu1[0] + Gx1[12]*Gu1[4] + Gx1[22]*Gu1[8] + Gx1[32]*Gu1[12] + Gx1[42]*Gu1[16] + Gx1[52]*Gu1[20] + Gx1[62]*Gu1[24] + Gx1[72]*Gu1[28] + Gx1[82]*Gu1[32] + Gx1[92]*Gu1[36]; Gu2[9] = + Gx1[2]*Gu1[1] + Gx1[12]*Gu1[5] + Gx1[22]*Gu1[9] + Gx1[32]*Gu1[13] + Gx1[42]*Gu1[17] + Gx1[52]*Gu1[21] + Gx1[62]*Gu1[25] + Gx1[72]*Gu1[29] + Gx1[82]*Gu1[33] + Gx1[92]*Gu1[37]; Gu2[10] = + Gx1[2]*Gu1[2] + Gx1[12]*Gu1[6] + Gx1[22]*Gu1[10] + Gx1[32]*Gu1[14] + Gx1[42]*Gu1[18] + Gx1[52]*Gu1[22] + Gx1[62]*Gu1[26] + Gx1[72]*Gu1[30] + Gx1[82]*Gu1[34] + Gx1[92]*Gu1[38]; Gu2[11] = + Gx1[2]*Gu1[3] + Gx1[12]*Gu1[7] + Gx1[22]*Gu1[11] + Gx1[32]*Gu1[15] + Gx1[42]*Gu1[19] + Gx1[52]*Gu1[23] + Gx1[62]*Gu1[27] + Gx1[72]*Gu1[31] + Gx1[82]*Gu1[35] + Gx1[92]*Gu1[39]; Gu2[12] = + Gx1[3]*Gu1[0] + Gx1[13]*Gu1[4] + Gx1[23]*Gu1[8] + Gx1[33]*Gu1[12] + Gx1[43]*Gu1[16] + Gx1[53]*Gu1[20] + Gx1[63]*Gu1[24] + Gx1[73]*Gu1[28] + Gx1[83]*Gu1[32] + Gx1[93]*Gu1[36]; Gu2[13] = + Gx1[3]*Gu1[1] + Gx1[13]*Gu1[5] + Gx1[23]*Gu1[9] + Gx1[33]*Gu1[13] + Gx1[43]*Gu1[17] + Gx1[53]*Gu1[21] + Gx1[63]*Gu1[25] + Gx1[73]*Gu1[29] + Gx1[83]*Gu1[33] + Gx1[93]*Gu1[37]; Gu2[14] = + Gx1[3]*Gu1[2] + Gx1[13]*Gu1[6] + Gx1[23]*Gu1[10] + Gx1[33]*Gu1[14] + Gx1[43]*Gu1[18] + Gx1[53]*Gu1[22] + Gx1[63]*Gu1[26] + Gx1[73]*Gu1[30] + Gx1[83]*Gu1[34] + Gx1[93]*Gu1[38]; Gu2[15] = + Gx1[3]*Gu1[3] + Gx1[13]*Gu1[7] + Gx1[23]*Gu1[11] + Gx1[33]*Gu1[15] + Gx1[43]*Gu1[19] + Gx1[53]*Gu1[23] + Gx1[63]*Gu1[27] + Gx1[73]*Gu1[31] + Gx1[83]*Gu1[35] + Gx1[93]*Gu1[39]; Gu2[16] = + Gx1[4]*Gu1[0] + Gx1[14]*Gu1[4] + Gx1[24]*Gu1[8] + Gx1[34]*Gu1[12] + Gx1[44]*Gu1[16] + Gx1[54]*Gu1[20] + Gx1[64]*Gu1[24] + Gx1[74]*Gu1[28] + Gx1[84]*Gu1[32] + Gx1[94]*Gu1[36]; Gu2[17] = + Gx1[4]*Gu1[1] + Gx1[14]*Gu1[5] + Gx1[24]*Gu1[9] + Gx1[34]*Gu1[13] + Gx1[44]*Gu1[17] + Gx1[54]*Gu1[21] + Gx1[64]*Gu1[25] + Gx1[74]*Gu1[29] + Gx1[84]*Gu1[33] + Gx1[94]*Gu1[37]; Gu2[18] = + Gx1[4]*Gu1[2] + Gx1[14]*Gu1[6] + Gx1[24]*Gu1[10] + Gx1[34]*Gu1[14] + Gx1[44]*Gu1[18] + Gx1[54]*Gu1[22] + Gx1[64]*Gu1[26] + Gx1[74]*Gu1[30] + Gx1[84]*Gu1[34] + Gx1[94]*Gu1[38]; Gu2[19] = + Gx1[4]*Gu1[3] + Gx1[14]*Gu1[7] + Gx1[24]*Gu1[11] + Gx1[34]*Gu1[15] + Gx1[44]*Gu1[19] + Gx1[54]*Gu1[23] + Gx1[64]*Gu1[27] + Gx1[74]*Gu1[31] + Gx1[84]*Gu1[35] + Gx1[94]*Gu1[39]; Gu2[20] = + Gx1[5]*Gu1[0] + Gx1[15]*Gu1[4] + Gx1[25]*Gu1[8] + Gx1[35]*Gu1[12] + Gx1[45]*Gu1[16] + Gx1[55]*Gu1[20] + Gx1[65]*Gu1[24] + Gx1[75]*Gu1[28] + Gx1[85]*Gu1[32] + Gx1[95]*Gu1[36]; Gu2[21] = + Gx1[5]*Gu1[1] + Gx1[15]*Gu1[5] + Gx1[25]*Gu1[9] + Gx1[35]*Gu1[13] + Gx1[45]*Gu1[17] + Gx1[55]*Gu1[21] + Gx1[65]*Gu1[25] + Gx1[75]*Gu1[29] + Gx1[85]*Gu1[33] + Gx1[95]*Gu1[37]; Gu2[22] = + Gx1[5]*Gu1[2] + Gx1[15]*Gu1[6] + Gx1[25]*Gu1[10] + Gx1[35]*Gu1[14] + Gx1[45]*Gu1[18] + Gx1[55]*Gu1[22] + Gx1[65]*Gu1[26] + Gx1[75]*Gu1[30] + Gx1[85]*Gu1[34] + Gx1[95]*Gu1[38]; Gu2[23] = + Gx1[5]*Gu1[3] + Gx1[15]*Gu1[7] + Gx1[25]*Gu1[11] + Gx1[35]*Gu1[15] + Gx1[45]*Gu1[19] + Gx1[55]*Gu1[23] + Gx1[65]*Gu1[27] + Gx1[75]*Gu1[31] + Gx1[85]*Gu1[35] + Gx1[95]*Gu1[39]; Gu2[24] = + Gx1[6]*Gu1[0] + Gx1[16]*Gu1[4] + Gx1[26]*Gu1[8] + Gx1[36]*Gu1[12] + Gx1[46]*Gu1[16] + Gx1[56]*Gu1[20] + Gx1[66]*Gu1[24] + Gx1[76]*Gu1[28] + Gx1[86]*Gu1[32] + Gx1[96]*Gu1[36]; Gu2[25] = + Gx1[6]*Gu1[1] + Gx1[16]*Gu1[5] + Gx1[26]*Gu1[9] + Gx1[36]*Gu1[13] + Gx1[46]*Gu1[17] + Gx1[56]*Gu1[21] + Gx1[66]*Gu1[25] + Gx1[76]*Gu1[29] + Gx1[86]*Gu1[33] + Gx1[96]*Gu1[37]; Gu2[26] = + Gx1[6]*Gu1[2] + Gx1[16]*Gu1[6] + Gx1[26]*Gu1[10] + Gx1[36]*Gu1[14] + Gx1[46]*Gu1[18] + Gx1[56]*Gu1[22] + Gx1[66]*Gu1[26] + Gx1[76]*Gu1[30] + Gx1[86]*Gu1[34] + Gx1[96]*Gu1[38]; Gu2[27] = + Gx1[6]*Gu1[3] + Gx1[16]*Gu1[7] + Gx1[26]*Gu1[11] + Gx1[36]*Gu1[15] + Gx1[46]*Gu1[19] + Gx1[56]*Gu1[23] + Gx1[66]*Gu1[27] + Gx1[76]*Gu1[31] + Gx1[86]*Gu1[35] + Gx1[96]*Gu1[39]; Gu2[28] = + Gx1[7]*Gu1[0] + Gx1[17]*Gu1[4] + Gx1[27]*Gu1[8] + Gx1[37]*Gu1[12] + Gx1[47]*Gu1[16] + Gx1[57]*Gu1[20] + Gx1[67]*Gu1[24] + Gx1[77]*Gu1[28] + Gx1[87]*Gu1[32] + Gx1[97]*Gu1[36]; Gu2[29] = + Gx1[7]*Gu1[1] + Gx1[17]*Gu1[5] + Gx1[27]*Gu1[9] + Gx1[37]*Gu1[13] + Gx1[47]*Gu1[17] + Gx1[57]*Gu1[21] + Gx1[67]*Gu1[25] + Gx1[77]*Gu1[29] + Gx1[87]*Gu1[33] + Gx1[97]*Gu1[37]; Gu2[30] = + Gx1[7]*Gu1[2] + Gx1[17]*Gu1[6] + Gx1[27]*Gu1[10] + Gx1[37]*Gu1[14] + Gx1[47]*Gu1[18] + Gx1[57]*Gu1[22] + Gx1[67]*Gu1[26] + Gx1[77]*Gu1[30] + Gx1[87]*Gu1[34] + Gx1[97]*Gu1[38]; Gu2[31] = + Gx1[7]*Gu1[3] + Gx1[17]*Gu1[7] + Gx1[27]*Gu1[11] + Gx1[37]*Gu1[15] + Gx1[47]*Gu1[19] + Gx1[57]*Gu1[23] + Gx1[67]*Gu1[27] + Gx1[77]*Gu1[31] + Gx1[87]*Gu1[35] + Gx1[97]*Gu1[39]; Gu2[32] = + Gx1[8]*Gu1[0] + Gx1[18]*Gu1[4] + Gx1[28]*Gu1[8] + Gx1[38]*Gu1[12] + Gx1[48]*Gu1[16] + Gx1[58]*Gu1[20] + Gx1[68]*Gu1[24] + Gx1[78]*Gu1[28] + Gx1[88]*Gu1[32] + Gx1[98]*Gu1[36]; Gu2[33] = + Gx1[8]*Gu1[1] + Gx1[18]*Gu1[5] + Gx1[28]*Gu1[9] + Gx1[38]*Gu1[13] + Gx1[48]*Gu1[17] + Gx1[58]*Gu1[21] + Gx1[68]*Gu1[25] + Gx1[78]*Gu1[29] + Gx1[88]*Gu1[33] + Gx1[98]*Gu1[37]; Gu2[34] = + Gx1[8]*Gu1[2] + Gx1[18]*Gu1[6] + Gx1[28]*Gu1[10] + Gx1[38]*Gu1[14] + Gx1[48]*Gu1[18] + Gx1[58]*Gu1[22] + Gx1[68]*Gu1[26] + Gx1[78]*Gu1[30] + Gx1[88]*Gu1[34] + Gx1[98]*Gu1[38]; Gu2[35] = + Gx1[8]*Gu1[3] + Gx1[18]*Gu1[7] + Gx1[28]*Gu1[11] + Gx1[38]*Gu1[15] + Gx1[48]*Gu1[19] + Gx1[58]*Gu1[23] + Gx1[68]*Gu1[27] + Gx1[78]*Gu1[31] + Gx1[88]*Gu1[35] + Gx1[98]*Gu1[39]; Gu2[36] = + Gx1[9]*Gu1[0] + Gx1[19]*Gu1[4] + Gx1[29]*Gu1[8] + Gx1[39]*Gu1[12] + Gx1[49]*Gu1[16] + Gx1[59]*Gu1[20] + Gx1[69]*Gu1[24] + Gx1[79]*Gu1[28] + Gx1[89]*Gu1[32] + Gx1[99]*Gu1[36]; Gu2[37] = + Gx1[9]*Gu1[1] + Gx1[19]*Gu1[5] + Gx1[29]*Gu1[9] + Gx1[39]*Gu1[13] + Gx1[49]*Gu1[17] + Gx1[59]*Gu1[21] + Gx1[69]*Gu1[25] + Gx1[79]*Gu1[29] + Gx1[89]*Gu1[33] + Gx1[99]*Gu1[37]; Gu2[38] = + Gx1[9]*Gu1[2] + Gx1[19]*Gu1[6] + Gx1[29]*Gu1[10] + Gx1[39]*Gu1[14] + Gx1[49]*Gu1[18] + Gx1[59]*Gu1[22] + Gx1[69]*Gu1[26] + Gx1[79]*Gu1[30] + Gx1[89]*Gu1[34] + Gx1[99]*Gu1[38]; Gu2[39] = + Gx1[9]*Gu1[3] + Gx1[19]*Gu1[7] + Gx1[29]*Gu1[11] + Gx1[39]*Gu1[15] + Gx1[49]*Gu1[19] + Gx1[59]*Gu1[23] + Gx1[69]*Gu1[27] + Gx1[79]*Gu1[31] + Gx1[89]*Gu1[35] + Gx1[99]*Gu1[39]; } void acado_multQEW2( real_t* const Q11, real_t* const Gu1, real_t* const Gu2, real_t* const Gu3 ) { Gu3[0] = + Q11[0]*Gu1[0] + Q11[1]*Gu1[4] + Q11[2]*Gu1[8] + Q11[3]*Gu1[12] + Q11[4]*Gu1[16] + Q11[5]*Gu1[20] + Q11[6]*Gu1[24] + Q11[7]*Gu1[28] + Q11[8]*Gu1[32] + Q11[9]*Gu1[36] + Gu2[0]; Gu3[1] = + Q11[0]*Gu1[1] + Q11[1]*Gu1[5] + Q11[2]*Gu1[9] + Q11[3]*Gu1[13] + Q11[4]*Gu1[17] + Q11[5]*Gu1[21] + Q11[6]*Gu1[25] + Q11[7]*Gu1[29] + Q11[8]*Gu1[33] + Q11[9]*Gu1[37] + Gu2[1]; Gu3[2] = + Q11[0]*Gu1[2] + Q11[1]*Gu1[6] + Q11[2]*Gu1[10] + Q11[3]*Gu1[14] + Q11[4]*Gu1[18] + Q11[5]*Gu1[22] + Q11[6]*Gu1[26] + Q11[7]*Gu1[30] + Q11[8]*Gu1[34] + Q11[9]*Gu1[38] + Gu2[2]; Gu3[3] = + Q11[0]*Gu1[3] + Q11[1]*Gu1[7] + Q11[2]*Gu1[11] + Q11[3]*Gu1[15] + Q11[4]*Gu1[19] + Q11[5]*Gu1[23] + Q11[6]*Gu1[27] + Q11[7]*Gu1[31] + Q11[8]*Gu1[35] + Q11[9]*Gu1[39] + Gu2[3]; Gu3[4] = + Q11[10]*Gu1[0] + Q11[11]*Gu1[4] + Q11[12]*Gu1[8] + Q11[13]*Gu1[12] + Q11[14]*Gu1[16] + Q11[15]*Gu1[20] + Q11[16]*Gu1[24] + Q11[17]*Gu1[28] + Q11[18]*Gu1[32] + Q11[19]*Gu1[36] + Gu2[4]; Gu3[5] = + Q11[10]*Gu1[1] + Q11[11]*Gu1[5] + Q11[12]*Gu1[9] + Q11[13]*Gu1[13] + Q11[14]*Gu1[17] + Q11[15]*Gu1[21] + Q11[16]*Gu1[25] + Q11[17]*Gu1[29] + Q11[18]*Gu1[33] + Q11[19]*Gu1[37] + Gu2[5]; Gu3[6] = + Q11[10]*Gu1[2] + Q11[11]*Gu1[6] + Q11[12]*Gu1[10] + Q11[13]*Gu1[14] + Q11[14]*Gu1[18] + Q11[15]*Gu1[22] + Q11[16]*Gu1[26] + Q11[17]*Gu1[30] + Q11[18]*Gu1[34] + Q11[19]*Gu1[38] + Gu2[6]; Gu3[7] = + Q11[10]*Gu1[3] + Q11[11]*Gu1[7] + Q11[12]*Gu1[11] + Q11[13]*Gu1[15] + Q11[14]*Gu1[19] + Q11[15]*Gu1[23] + Q11[16]*Gu1[27] + Q11[17]*Gu1[31] + Q11[18]*Gu1[35] + Q11[19]*Gu1[39] + Gu2[7]; Gu3[8] = + Q11[20]*Gu1[0] + Q11[21]*Gu1[4] + Q11[22]*Gu1[8] + Q11[23]*Gu1[12] + Q11[24]*Gu1[16] + Q11[25]*Gu1[20] + Q11[26]*Gu1[24] + Q11[27]*Gu1[28] + Q11[28]*Gu1[32] + Q11[29]*Gu1[36] + Gu2[8]; Gu3[9] = + Q11[20]*Gu1[1] + Q11[21]*Gu1[5] + Q11[22]*Gu1[9] + Q11[23]*Gu1[13] + Q11[24]*Gu1[17] + Q11[25]*Gu1[21] + Q11[26]*Gu1[25] + Q11[27]*Gu1[29] + Q11[28]*Gu1[33] + Q11[29]*Gu1[37] + Gu2[9]; Gu3[10] = + Q11[20]*Gu1[2] + Q11[21]*Gu1[6] + Q11[22]*Gu1[10] + Q11[23]*Gu1[14] + Q11[24]*Gu1[18] + Q11[25]*Gu1[22] + Q11[26]*Gu1[26] + Q11[27]*Gu1[30] + Q11[28]*Gu1[34] + Q11[29]*Gu1[38] + Gu2[10]; Gu3[11] = + Q11[20]*Gu1[3] + Q11[21]*Gu1[7] + Q11[22]*Gu1[11] + Q11[23]*Gu1[15] + Q11[24]*Gu1[19] + Q11[25]*Gu1[23] + Q11[26]*Gu1[27] + Q11[27]*Gu1[31] + Q11[28]*Gu1[35] + Q11[29]*Gu1[39] + Gu2[11]; Gu3[12] = + Q11[30]*Gu1[0] + Q11[31]*Gu1[4] + Q11[32]*Gu1[8] + Q11[33]*Gu1[12] + Q11[34]*Gu1[16] + Q11[35]*Gu1[20] + Q11[36]*Gu1[24] + Q11[37]*Gu1[28] + Q11[38]*Gu1[32] + Q11[39]*Gu1[36] + Gu2[12]; Gu3[13] = + Q11[30]*Gu1[1] + Q11[31]*Gu1[5] + Q11[32]*Gu1[9] + Q11[33]*Gu1[13] + Q11[34]*Gu1[17] + Q11[35]*Gu1[21] + Q11[36]*Gu1[25] + Q11[37]*Gu1[29] + Q11[38]*Gu1[33] + Q11[39]*Gu1[37] + Gu2[13]; Gu3[14] = + Q11[30]*Gu1[2] + Q11[31]*Gu1[6] + Q11[32]*Gu1[10] + Q11[33]*Gu1[14] + Q11[34]*Gu1[18] + Q11[35]*Gu1[22] + Q11[36]*Gu1[26] + Q11[37]*Gu1[30] + Q11[38]*Gu1[34] + Q11[39]*Gu1[38] + Gu2[14]; Gu3[15] = + Q11[30]*Gu1[3] + Q11[31]*Gu1[7] + Q11[32]*Gu1[11] + Q11[33]*Gu1[15] + Q11[34]*Gu1[19] + Q11[35]*Gu1[23] + Q11[36]*Gu1[27] + Q11[37]*Gu1[31] + Q11[38]*Gu1[35] + Q11[39]*Gu1[39] + Gu2[15]; Gu3[16] = + Q11[40]*Gu1[0] + Q11[41]*Gu1[4] + Q11[42]*Gu1[8] + Q11[43]*Gu1[12] + Q11[44]*Gu1[16] + Q11[45]*Gu1[20] + Q11[46]*Gu1[24] + Q11[47]*Gu1[28] + Q11[48]*Gu1[32] + Q11[49]*Gu1[36] + Gu2[16]; Gu3[17] = + Q11[40]*Gu1[1] + Q11[41]*Gu1[5] + Q11[42]*Gu1[9] + Q11[43]*Gu1[13] + Q11[44]*Gu1[17] + Q11[45]*Gu1[21] + Q11[46]*Gu1[25] + Q11[47]*Gu1[29] + Q11[48]*Gu1[33] + Q11[49]*Gu1[37] + Gu2[17]; Gu3[18] = + Q11[40]*Gu1[2] + Q11[41]*Gu1[6] + Q11[42]*Gu1[10] + Q11[43]*Gu1[14] + Q11[44]*Gu1[18] + Q11[45]*Gu1[22] + Q11[46]*Gu1[26] + Q11[47]*Gu1[30] + Q11[48]*Gu1[34] + Q11[49]*Gu1[38] + Gu2[18]; Gu3[19] = + Q11[40]*Gu1[3] + Q11[41]*Gu1[7] + Q11[42]*Gu1[11] + Q11[43]*Gu1[15] + Q11[44]*Gu1[19] + Q11[45]*Gu1[23] + Q11[46]*Gu1[27] + Q11[47]*Gu1[31] + Q11[48]*Gu1[35] + Q11[49]*Gu1[39] + Gu2[19]; Gu3[20] = + Q11[50]*Gu1[0] + Q11[51]*Gu1[4] + Q11[52]*Gu1[8] + Q11[53]*Gu1[12] + Q11[54]*Gu1[16] + Q11[55]*Gu1[20] + Q11[56]*Gu1[24] + Q11[57]*Gu1[28] + Q11[58]*Gu1[32] + Q11[59]*Gu1[36] + Gu2[20]; Gu3[21] = + Q11[50]*Gu1[1] + Q11[51]*Gu1[5] + Q11[52]*Gu1[9] + Q11[53]*Gu1[13] + Q11[54]*Gu1[17] + Q11[55]*Gu1[21] + Q11[56]*Gu1[25] + Q11[57]*Gu1[29] + Q11[58]*Gu1[33] + Q11[59]*Gu1[37] + Gu2[21]; Gu3[22] = + Q11[50]*Gu1[2] + Q11[51]*Gu1[6] + Q11[52]*Gu1[10] + Q11[53]*Gu1[14] + Q11[54]*Gu1[18] + Q11[55]*Gu1[22] + Q11[56]*Gu1[26] + Q11[57]*Gu1[30] + Q11[58]*Gu1[34] + Q11[59]*Gu1[38] + Gu2[22]; Gu3[23] = + Q11[50]*Gu1[3] + Q11[51]*Gu1[7] + Q11[52]*Gu1[11] + Q11[53]*Gu1[15] + Q11[54]*Gu1[19] + Q11[55]*Gu1[23] + Q11[56]*Gu1[27] + Q11[57]*Gu1[31] + Q11[58]*Gu1[35] + Q11[59]*Gu1[39] + Gu2[23]; Gu3[24] = + Q11[60]*Gu1[0] + Q11[61]*Gu1[4] + Q11[62]*Gu1[8] + Q11[63]*Gu1[12] + Q11[64]*Gu1[16] + Q11[65]*Gu1[20] + Q11[66]*Gu1[24] + Q11[67]*Gu1[28] + Q11[68]*Gu1[32] + Q11[69]*Gu1[36] + Gu2[24]; Gu3[25] = + Q11[60]*Gu1[1] + Q11[61]*Gu1[5] + Q11[62]*Gu1[9] + Q11[63]*Gu1[13] + Q11[64]*Gu1[17] + Q11[65]*Gu1[21] + Q11[66]*Gu1[25] + Q11[67]*Gu1[29] + Q11[68]*Gu1[33] + Q11[69]*Gu1[37] + Gu2[25]; Gu3[26] = + Q11[60]*Gu1[2] + Q11[61]*Gu1[6] + Q11[62]*Gu1[10] + Q11[63]*Gu1[14] + Q11[64]*Gu1[18] + Q11[65]*Gu1[22] + Q11[66]*Gu1[26] + Q11[67]*Gu1[30] + Q11[68]*Gu1[34] + Q11[69]*Gu1[38] + Gu2[26]; Gu3[27] = + Q11[60]*Gu1[3] + Q11[61]*Gu1[7] + Q11[62]*Gu1[11] + Q11[63]*Gu1[15] + Q11[64]*Gu1[19] + Q11[65]*Gu1[23] + Q11[66]*Gu1[27] + Q11[67]*Gu1[31] + Q11[68]*Gu1[35] + Q11[69]*Gu1[39] + Gu2[27]; Gu3[28] = + Q11[70]*Gu1[0] + Q11[71]*Gu1[4] + Q11[72]*Gu1[8] + Q11[73]*Gu1[12] + Q11[74]*Gu1[16] + Q11[75]*Gu1[20] + Q11[76]*Gu1[24] + Q11[77]*Gu1[28] + Q11[78]*Gu1[32] + Q11[79]*Gu1[36] + Gu2[28]; Gu3[29] = + Q11[70]*Gu1[1] + Q11[71]*Gu1[5] + Q11[72]*Gu1[9] + Q11[73]*Gu1[13] + Q11[74]*Gu1[17] + Q11[75]*Gu1[21] + Q11[76]*Gu1[25] + Q11[77]*Gu1[29] + Q11[78]*Gu1[33] + Q11[79]*Gu1[37] + Gu2[29]; Gu3[30] = + Q11[70]*Gu1[2] + Q11[71]*Gu1[6] + Q11[72]*Gu1[10] + Q11[73]*Gu1[14] + Q11[74]*Gu1[18] + Q11[75]*Gu1[22] + Q11[76]*Gu1[26] + Q11[77]*Gu1[30] + Q11[78]*Gu1[34] + Q11[79]*Gu1[38] + Gu2[30]; Gu3[31] = + Q11[70]*Gu1[3] + Q11[71]*Gu1[7] + Q11[72]*Gu1[11] + Q11[73]*Gu1[15] + Q11[74]*Gu1[19] + Q11[75]*Gu1[23] + Q11[76]*Gu1[27] + Q11[77]*Gu1[31] + Q11[78]*Gu1[35] + Q11[79]*Gu1[39] + Gu2[31]; Gu3[32] = + Q11[80]*Gu1[0] + Q11[81]*Gu1[4] + Q11[82]*Gu1[8] + Q11[83]*Gu1[12] + Q11[84]*Gu1[16] + Q11[85]*Gu1[20] + Q11[86]*Gu1[24] + Q11[87]*Gu1[28] + Q11[88]*Gu1[32] + Q11[89]*Gu1[36] + Gu2[32]; Gu3[33] = + Q11[80]*Gu1[1] + Q11[81]*Gu1[5] + Q11[82]*Gu1[9] + Q11[83]*Gu1[13] + Q11[84]*Gu1[17] + Q11[85]*Gu1[21] + Q11[86]*Gu1[25] + Q11[87]*Gu1[29] + Q11[88]*Gu1[33] + Q11[89]*Gu1[37] + Gu2[33]; Gu3[34] = + Q11[80]*Gu1[2] + Q11[81]*Gu1[6] + Q11[82]*Gu1[10] + Q11[83]*Gu1[14] + Q11[84]*Gu1[18] + Q11[85]*Gu1[22] + Q11[86]*Gu1[26] + Q11[87]*Gu1[30] + Q11[88]*Gu1[34] + Q11[89]*Gu1[38] + Gu2[34]; Gu3[35] = + Q11[80]*Gu1[3] + Q11[81]*Gu1[7] + Q11[82]*Gu1[11] + Q11[83]*Gu1[15] + Q11[84]*Gu1[19] + Q11[85]*Gu1[23] + Q11[86]*Gu1[27] + Q11[87]*Gu1[31] + Q11[88]*Gu1[35] + Q11[89]*Gu1[39] + Gu2[35]; Gu3[36] = + Q11[90]*Gu1[0] + Q11[91]*Gu1[4] + Q11[92]*Gu1[8] + Q11[93]*Gu1[12] + Q11[94]*Gu1[16] + Q11[95]*Gu1[20] + Q11[96]*Gu1[24] + Q11[97]*Gu1[28] + Q11[98]*Gu1[32] + Q11[99]*Gu1[36] + Gu2[36]; Gu3[37] = + Q11[90]*Gu1[1] + Q11[91]*Gu1[5] + Q11[92]*Gu1[9] + Q11[93]*Gu1[13] + Q11[94]*Gu1[17] + Q11[95]*Gu1[21] + Q11[96]*Gu1[25] + Q11[97]*Gu1[29] + Q11[98]*Gu1[33] + Q11[99]*Gu1[37] + Gu2[37]; Gu3[38] = + Q11[90]*Gu1[2] + Q11[91]*Gu1[6] + Q11[92]*Gu1[10] + Q11[93]*Gu1[14] + Q11[94]*Gu1[18] + Q11[95]*Gu1[22] + Q11[96]*Gu1[26] + Q11[97]*Gu1[30] + Q11[98]*Gu1[34] + Q11[99]*Gu1[38] + Gu2[38]; Gu3[39] = + Q11[90]*Gu1[3] + Q11[91]*Gu1[7] + Q11[92]*Gu1[11] + Q11[93]*Gu1[15] + Q11[94]*Gu1[19] + Q11[95]*Gu1[23] + Q11[96]*Gu1[27] + Q11[97]*Gu1[31] + Q11[98]*Gu1[35] + Q11[99]*Gu1[39] + Gu2[39]; } void acado_macATw1QDy( real_t* const Gx1, real_t* const w11, real_t* const w12, real_t* const w13 ) { w13[0] = + Gx1[0]*w11[0] + Gx1[10]*w11[1] + Gx1[20]*w11[2] + Gx1[30]*w11[3] + Gx1[40]*w11[4] + Gx1[50]*w11[5] + Gx1[60]*w11[6] + Gx1[70]*w11[7] + Gx1[80]*w11[8] + Gx1[90]*w11[9] + w12[0]; w13[1] = + Gx1[1]*w11[0] + Gx1[11]*w11[1] + Gx1[21]*w11[2] + Gx1[31]*w11[3] + Gx1[41]*w11[4] + Gx1[51]*w11[5] + Gx1[61]*w11[6] + Gx1[71]*w11[7] + Gx1[81]*w11[8] + Gx1[91]*w11[9] + w12[1]; w13[2] = + Gx1[2]*w11[0] + Gx1[12]*w11[1] + Gx1[22]*w11[2] + Gx1[32]*w11[3] + Gx1[42]*w11[4] + Gx1[52]*w11[5] + Gx1[62]*w11[6] + Gx1[72]*w11[7] + Gx1[82]*w11[8] + Gx1[92]*w11[9] + w12[2]; w13[3] = + Gx1[3]*w11[0] + Gx1[13]*w11[1] + Gx1[23]*w11[2] + Gx1[33]*w11[3] + Gx1[43]*w11[4] + Gx1[53]*w11[5] + Gx1[63]*w11[6] + Gx1[73]*w11[7] + Gx1[83]*w11[8] + Gx1[93]*w11[9] + w12[3]; w13[4] = + Gx1[4]*w11[0] + Gx1[14]*w11[1] + Gx1[24]*w11[2] + Gx1[34]*w11[3] + Gx1[44]*w11[4] + Gx1[54]*w11[5] + Gx1[64]*w11[6] + Gx1[74]*w11[7] + Gx1[84]*w11[8] + Gx1[94]*w11[9] + w12[4]; w13[5] = + Gx1[5]*w11[0] + Gx1[15]*w11[1] + Gx1[25]*w11[2] + Gx1[35]*w11[3] + Gx1[45]*w11[4] + Gx1[55]*w11[5] + Gx1[65]*w11[6] + Gx1[75]*w11[7] + Gx1[85]*w11[8] + Gx1[95]*w11[9] + w12[5]; w13[6] = + Gx1[6]*w11[0] + Gx1[16]*w11[1] + Gx1[26]*w11[2] + Gx1[36]*w11[3] + Gx1[46]*w11[4] + Gx1[56]*w11[5] + Gx1[66]*w11[6] + Gx1[76]*w11[7] + Gx1[86]*w11[8] + Gx1[96]*w11[9] + w12[6]; w13[7] = + Gx1[7]*w11[0] + Gx1[17]*w11[1] + Gx1[27]*w11[2] + Gx1[37]*w11[3] + Gx1[47]*w11[4] + Gx1[57]*w11[5] + Gx1[67]*w11[6] + Gx1[77]*w11[7] + Gx1[87]*w11[8] + Gx1[97]*w11[9] + w12[7]; w13[8] = + Gx1[8]*w11[0] + Gx1[18]*w11[1] + Gx1[28]*w11[2] + Gx1[38]*w11[3] + Gx1[48]*w11[4] + Gx1[58]*w11[5] + Gx1[68]*w11[6] + Gx1[78]*w11[7] + Gx1[88]*w11[8] + Gx1[98]*w11[9] + w12[8]; w13[9] = + Gx1[9]*w11[0] + Gx1[19]*w11[1] + Gx1[29]*w11[2] + Gx1[39]*w11[3] + Gx1[49]*w11[4] + Gx1[59]*w11[5] + Gx1[69]*w11[6] + Gx1[79]*w11[7] + Gx1[89]*w11[8] + Gx1[99]*w11[9] + w12[9]; } void acado_macBTw1( real_t* const Gu1, real_t* const w11, real_t* const U1 ) { U1[0] += + Gu1[0]*w11[0] + Gu1[4]*w11[1] + Gu1[8]*w11[2] + Gu1[12]*w11[3] + Gu1[16]*w11[4] + Gu1[20]*w11[5] + Gu1[24]*w11[6] + Gu1[28]*w11[7] + Gu1[32]*w11[8] + Gu1[36]*w11[9]; U1[1] += + Gu1[1]*w11[0] + Gu1[5]*w11[1] + Gu1[9]*w11[2] + Gu1[13]*w11[3] + Gu1[17]*w11[4] + Gu1[21]*w11[5] + Gu1[25]*w11[6] + Gu1[29]*w11[7] + Gu1[33]*w11[8] + Gu1[37]*w11[9]; U1[2] += + Gu1[2]*w11[0] + Gu1[6]*w11[1] + Gu1[10]*w11[2] + Gu1[14]*w11[3] + Gu1[18]*w11[4] + Gu1[22]*w11[5] + Gu1[26]*w11[6] + Gu1[30]*w11[7] + Gu1[34]*w11[8] + Gu1[38]*w11[9]; U1[3] += + Gu1[3]*w11[0] + Gu1[7]*w11[1] + Gu1[11]*w11[2] + Gu1[15]*w11[3] + Gu1[19]*w11[4] + Gu1[23]*w11[5] + Gu1[27]*w11[6] + Gu1[31]*w11[7] + Gu1[35]*w11[8] + Gu1[39]*w11[9]; } void acado_macQSbarW2( real_t* const Q11, real_t* const w11, real_t* const w12, real_t* const w13 ) { w13[0] = + Q11[0]*w11[0] + Q11[1]*w11[1] + Q11[2]*w11[2] + Q11[3]*w11[3] + Q11[4]*w11[4] + Q11[5]*w11[5] + Q11[6]*w11[6] + Q11[7]*w11[7] + Q11[8]*w11[8] + Q11[9]*w11[9] + w12[0]; w13[1] = + Q11[10]*w11[0] + Q11[11]*w11[1] + Q11[12]*w11[2] + Q11[13]*w11[3] + Q11[14]*w11[4] + Q11[15]*w11[5] + Q11[16]*w11[6] + Q11[17]*w11[7] + Q11[18]*w11[8] + Q11[19]*w11[9] + w12[1]; w13[2] = + Q11[20]*w11[0] + Q11[21]*w11[1] + Q11[22]*w11[2] + Q11[23]*w11[3] + Q11[24]*w11[4] + Q11[25]*w11[5] + Q11[26]*w11[6] + Q11[27]*w11[7] + Q11[28]*w11[8] + Q11[29]*w11[9] + w12[2]; w13[3] = + Q11[30]*w11[0] + Q11[31]*w11[1] + Q11[32]*w11[2] + Q11[33]*w11[3] + Q11[34]*w11[4] + Q11[35]*w11[5] + Q11[36]*w11[6] + Q11[37]*w11[7] + Q11[38]*w11[8] + Q11[39]*w11[9] + w12[3]; w13[4] = + Q11[40]*w11[0] + Q11[41]*w11[1] + Q11[42]*w11[2] + Q11[43]*w11[3] + Q11[44]*w11[4] + Q11[45]*w11[5] + Q11[46]*w11[6] + Q11[47]*w11[7] + Q11[48]*w11[8] + Q11[49]*w11[9] + w12[4]; w13[5] = + Q11[50]*w11[0] + Q11[51]*w11[1] + Q11[52]*w11[2] + Q11[53]*w11[3] + Q11[54]*w11[4] + Q11[55]*w11[5] + Q11[56]*w11[6] + Q11[57]*w11[7] + Q11[58]*w11[8] + Q11[59]*w11[9] + w12[5]; w13[6] = + Q11[60]*w11[0] + Q11[61]*w11[1] + Q11[62]*w11[2] + Q11[63]*w11[3] + Q11[64]*w11[4] + Q11[65]*w11[5] + Q11[66]*w11[6] + Q11[67]*w11[7] + Q11[68]*w11[8] + Q11[69]*w11[9] + w12[6]; w13[7] = + Q11[70]*w11[0] + Q11[71]*w11[1] + Q11[72]*w11[2] + Q11[73]*w11[3] + Q11[74]*w11[4] + Q11[75]*w11[5] + Q11[76]*w11[6] + Q11[77]*w11[7] + Q11[78]*w11[8] + Q11[79]*w11[9] + w12[7]; w13[8] = + Q11[80]*w11[0] + Q11[81]*w11[1] + Q11[82]*w11[2] + Q11[83]*w11[3] + Q11[84]*w11[4] + Q11[85]*w11[5] + Q11[86]*w11[6] + Q11[87]*w11[7] + Q11[88]*w11[8] + Q11[89]*w11[9] + w12[8]; w13[9] = + Q11[90]*w11[0] + Q11[91]*w11[1] + Q11[92]*w11[2] + Q11[93]*w11[3] + Q11[94]*w11[4] + Q11[95]*w11[5] + Q11[96]*w11[6] + Q11[97]*w11[7] + Q11[98]*w11[8] + Q11[99]*w11[9] + w12[9]; } void acado_macASbar( real_t* const Gx1, real_t* const w11, real_t* const w12 ) { w12[0] += + Gx1[0]*w11[0] + Gx1[1]*w11[1] + Gx1[2]*w11[2] + Gx1[3]*w11[3] + Gx1[4]*w11[4] + Gx1[5]*w11[5] + Gx1[6]*w11[6] + Gx1[7]*w11[7] + Gx1[8]*w11[8] + Gx1[9]*w11[9]; w12[1] += + Gx1[10]*w11[0] + Gx1[11]*w11[1] + Gx1[12]*w11[2] + Gx1[13]*w11[3] + Gx1[14]*w11[4] + Gx1[15]*w11[5] + Gx1[16]*w11[6] + Gx1[17]*w11[7] + Gx1[18]*w11[8] + Gx1[19]*w11[9]; w12[2] += + Gx1[20]*w11[0] + Gx1[21]*w11[1] + Gx1[22]*w11[2] + Gx1[23]*w11[3] + Gx1[24]*w11[4] + Gx1[25]*w11[5] + Gx1[26]*w11[6] + Gx1[27]*w11[7] + Gx1[28]*w11[8] + Gx1[29]*w11[9]; w12[3] += + Gx1[30]*w11[0] + Gx1[31]*w11[1] + Gx1[32]*w11[2] + Gx1[33]*w11[3] + Gx1[34]*w11[4] + Gx1[35]*w11[5] + Gx1[36]*w11[6] + Gx1[37]*w11[7] + Gx1[38]*w11[8] + Gx1[39]*w11[9]; w12[4] += + Gx1[40]*w11[0] + Gx1[41]*w11[1] + Gx1[42]*w11[2] + Gx1[43]*w11[3] + Gx1[44]*w11[4] + Gx1[45]*w11[5] + Gx1[46]*w11[6] + Gx1[47]*w11[7] + Gx1[48]*w11[8] + Gx1[49]*w11[9]; w12[5] += + Gx1[50]*w11[0] + Gx1[51]*w11[1] + Gx1[52]*w11[2] + Gx1[53]*w11[3] + Gx1[54]*w11[4] + Gx1[55]*w11[5] + Gx1[56]*w11[6] + Gx1[57]*w11[7] + Gx1[58]*w11[8] + Gx1[59]*w11[9]; w12[6] += + Gx1[60]*w11[0] + Gx1[61]*w11[1] + Gx1[62]*w11[2] + Gx1[63]*w11[3] + Gx1[64]*w11[4] + Gx1[65]*w11[5] + Gx1[66]*w11[6] + Gx1[67]*w11[7] + Gx1[68]*w11[8] + Gx1[69]*w11[9]; w12[7] += + Gx1[70]*w11[0] + Gx1[71]*w11[1] + Gx1[72]*w11[2] + Gx1[73]*w11[3] + Gx1[74]*w11[4] + Gx1[75]*w11[5] + Gx1[76]*w11[6] + Gx1[77]*w11[7] + Gx1[78]*w11[8] + Gx1[79]*w11[9]; w12[8] += + Gx1[80]*w11[0] + Gx1[81]*w11[1] + Gx1[82]*w11[2] + Gx1[83]*w11[3] + Gx1[84]*w11[4] + Gx1[85]*w11[5] + Gx1[86]*w11[6] + Gx1[87]*w11[7] + Gx1[88]*w11[8] + Gx1[89]*w11[9]; w12[9] += + Gx1[90]*w11[0] + Gx1[91]*w11[1] + Gx1[92]*w11[2] + Gx1[93]*w11[3] + Gx1[94]*w11[4] + Gx1[95]*w11[5] + Gx1[96]*w11[6] + Gx1[97]*w11[7] + Gx1[98]*w11[8] + Gx1[99]*w11[9]; } void acado_expansionStep( real_t* const Gx1, real_t* const Gu1, real_t* const U1, real_t* const w11, real_t* const w12 ) { w12[0] += + Gx1[0]*w11[0] + Gx1[1]*w11[1] + Gx1[2]*w11[2] + Gx1[3]*w11[3] + Gx1[4]*w11[4] + Gx1[5]*w11[5] + Gx1[6]*w11[6] + Gx1[7]*w11[7] + Gx1[8]*w11[8] + Gx1[9]*w11[9]; w12[1] += + Gx1[10]*w11[0] + Gx1[11]*w11[1] + Gx1[12]*w11[2] + Gx1[13]*w11[3] + Gx1[14]*w11[4] + Gx1[15]*w11[5] + Gx1[16]*w11[6] + Gx1[17]*w11[7] + Gx1[18]*w11[8] + Gx1[19]*w11[9]; w12[2] += + Gx1[20]*w11[0] + Gx1[21]*w11[1] + Gx1[22]*w11[2] + Gx1[23]*w11[3] + Gx1[24]*w11[4] + Gx1[25]*w11[5] + Gx1[26]*w11[6] + Gx1[27]*w11[7] + Gx1[28]*w11[8] + Gx1[29]*w11[9]; w12[3] += + Gx1[30]*w11[0] + Gx1[31]*w11[1] + Gx1[32]*w11[2] + Gx1[33]*w11[3] + Gx1[34]*w11[4] + Gx1[35]*w11[5] + Gx1[36]*w11[6] + Gx1[37]*w11[7] + Gx1[38]*w11[8] + Gx1[39]*w11[9]; w12[4] += + Gx1[40]*w11[0] + Gx1[41]*w11[1] + Gx1[42]*w11[2] + Gx1[43]*w11[3] + Gx1[44]*w11[4] + Gx1[45]*w11[5] + Gx1[46]*w11[6] + Gx1[47]*w11[7] + Gx1[48]*w11[8] + Gx1[49]*w11[9]; w12[5] += + Gx1[50]*w11[0] + Gx1[51]*w11[1] + Gx1[52]*w11[2] + Gx1[53]*w11[3] + Gx1[54]*w11[4] + Gx1[55]*w11[5] + Gx1[56]*w11[6] + Gx1[57]*w11[7] + Gx1[58]*w11[8] + Gx1[59]*w11[9]; w12[6] += + Gx1[60]*w11[0] + Gx1[61]*w11[1] + Gx1[62]*w11[2] + Gx1[63]*w11[3] + Gx1[64]*w11[4] + Gx1[65]*w11[5] + Gx1[66]*w11[6] + Gx1[67]*w11[7] + Gx1[68]*w11[8] + Gx1[69]*w11[9]; w12[7] += + Gx1[70]*w11[0] + Gx1[71]*w11[1] + Gx1[72]*w11[2] + Gx1[73]*w11[3] + Gx1[74]*w11[4] + Gx1[75]*w11[5] + Gx1[76]*w11[6] + Gx1[77]*w11[7] + Gx1[78]*w11[8] + Gx1[79]*w11[9]; w12[8] += + Gx1[80]*w11[0] + Gx1[81]*w11[1] + Gx1[82]*w11[2] + Gx1[83]*w11[3] + Gx1[84]*w11[4] + Gx1[85]*w11[5] + Gx1[86]*w11[6] + Gx1[87]*w11[7] + Gx1[88]*w11[8] + Gx1[89]*w11[9]; w12[9] += + Gx1[90]*w11[0] + Gx1[91]*w11[1] + Gx1[92]*w11[2] + Gx1[93]*w11[3] + Gx1[94]*w11[4] + Gx1[95]*w11[5] + Gx1[96]*w11[6] + Gx1[97]*w11[7] + Gx1[98]*w11[8] + Gx1[99]*w11[9]; w12[0] += + Gu1[0]*U1[0] + Gu1[1]*U1[1] + Gu1[2]*U1[2] + Gu1[3]*U1[3]; w12[1] += + Gu1[4]*U1[0] + Gu1[5]*U1[1] + Gu1[6]*U1[2] + Gu1[7]*U1[3]; w12[2] += + Gu1[8]*U1[0] + Gu1[9]*U1[1] + Gu1[10]*U1[2] + Gu1[11]*U1[3]; w12[3] += + Gu1[12]*U1[0] + Gu1[13]*U1[1] + Gu1[14]*U1[2] + Gu1[15]*U1[3]; w12[4] += + Gu1[16]*U1[0] + Gu1[17]*U1[1] + Gu1[18]*U1[2] + Gu1[19]*U1[3]; w12[5] += + Gu1[20]*U1[0] + Gu1[21]*U1[1] + Gu1[22]*U1[2] + Gu1[23]*U1[3]; w12[6] += + Gu1[24]*U1[0] + Gu1[25]*U1[1] + Gu1[26]*U1[2] + Gu1[27]*U1[3]; w12[7] += + Gu1[28]*U1[0] + Gu1[29]*U1[1] + Gu1[30]*U1[2] + Gu1[31]*U1[3]; w12[8] += + Gu1[32]*U1[0] + Gu1[33]*U1[1] + Gu1[34]*U1[2] + Gu1[35]*U1[3]; w12[9] += + Gu1[36]*U1[0] + Gu1[37]*U1[1] + Gu1[38]*U1[2] + Gu1[39]*U1[3]; } void acado_copyHTH( int iRow, int iCol ) { acadoWorkspace.H[(iRow * 320) + (iCol * 4)] = acadoWorkspace.H[(iCol * 320) + (iRow * 4)]; acadoWorkspace.H[(iRow * 320) + (iCol * 4 + 1)] = acadoWorkspace.H[(iCol * 320 + 80) + (iRow * 4)]; acadoWorkspace.H[(iRow * 320) + (iCol * 4 + 2)] = acadoWorkspace.H[(iCol * 320 + 160) + (iRow * 4)]; acadoWorkspace.H[(iRow * 320) + (iCol * 4 + 3)] = acadoWorkspace.H[(iCol * 320 + 240) + (iRow * 4)]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4)] = acadoWorkspace.H[(iCol * 320) + (iRow * 4 + 1)]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4 + 1)] = acadoWorkspace.H[(iCol * 320 + 80) + (iRow * 4 + 1)]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4 + 2)] = acadoWorkspace.H[(iCol * 320 + 160) + (iRow * 4 + 1)]; acadoWorkspace.H[(iRow * 320 + 80) + (iCol * 4 + 3)] = acadoWorkspace.H[(iCol * 320 + 240) + (iRow * 4 + 1)]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4)] = acadoWorkspace.H[(iCol * 320) + (iRow * 4 + 2)]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4 + 1)] = acadoWorkspace.H[(iCol * 320 + 80) + (iRow * 4 + 2)]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4 + 2)] = acadoWorkspace.H[(iCol * 320 + 160) + (iRow * 4 + 2)]; acadoWorkspace.H[(iRow * 320 + 160) + (iCol * 4 + 3)] = acadoWorkspace.H[(iCol * 320 + 240) + (iRow * 4 + 2)]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4)] = acadoWorkspace.H[(iCol * 320) + (iRow * 4 + 3)]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4 + 1)] = acadoWorkspace.H[(iCol * 320 + 80) + (iRow * 4 + 3)]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4 + 2)] = acadoWorkspace.H[(iCol * 320 + 160) + (iRow * 4 + 3)]; acadoWorkspace.H[(iRow * 320 + 240) + (iCol * 4 + 3)] = acadoWorkspace.H[(iCol * 320 + 240) + (iRow * 4 + 3)]; } void acado_multRDy( real_t* const R2, real_t* const Dy1, real_t* const RDy1 ) { RDy1[0] = + R2[0]*Dy1[0] + R2[1]*Dy1[1] + R2[2]*Dy1[2] + R2[3]*Dy1[3] + R2[4]*Dy1[4] + R2[5]*Dy1[5] + R2[6]*Dy1[6] + R2[7]*Dy1[7] + R2[8]*Dy1[8] + R2[9]*Dy1[9] + R2[10]*Dy1[10] + R2[11]*Dy1[11] + R2[12]*Dy1[12] + R2[13]*Dy1[13]; RDy1[1] = + R2[14]*Dy1[0] + R2[15]*Dy1[1] + R2[16]*Dy1[2] + R2[17]*Dy1[3] + R2[18]*Dy1[4] + R2[19]*Dy1[5] + R2[20]*Dy1[6] + R2[21]*Dy1[7] + R2[22]*Dy1[8] + R2[23]*Dy1[9] + R2[24]*Dy1[10] + R2[25]*Dy1[11] + R2[26]*Dy1[12] + R2[27]*Dy1[13]; RDy1[2] = + R2[28]*Dy1[0] + R2[29]*Dy1[1] + R2[30]*Dy1[2] + R2[31]*Dy1[3] + R2[32]*Dy1[4] + R2[33]*Dy1[5] + R2[34]*Dy1[6] + R2[35]*Dy1[7] + R2[36]*Dy1[8] + R2[37]*Dy1[9] + R2[38]*Dy1[10] + R2[39]*Dy1[11] + R2[40]*Dy1[12] + R2[41]*Dy1[13]; RDy1[3] = + R2[42]*Dy1[0] + R2[43]*Dy1[1] + R2[44]*Dy1[2] + R2[45]*Dy1[3] + R2[46]*Dy1[4] + R2[47]*Dy1[5] + R2[48]*Dy1[6] + R2[49]*Dy1[7] + R2[50]*Dy1[8] + R2[51]*Dy1[9] + R2[52]*Dy1[10] + R2[53]*Dy1[11] + R2[54]*Dy1[12] + R2[55]*Dy1[13]; } void acado_multQDy( real_t* const Q2, real_t* const Dy1, real_t* const QDy1 ) { QDy1[0] = + Q2[0]*Dy1[0] + Q2[1]*Dy1[1] + Q2[2]*Dy1[2] + Q2[3]*Dy1[3] + Q2[4]*Dy1[4] + Q2[5]*Dy1[5] + Q2[6]*Dy1[6] + Q2[7]*Dy1[7] + Q2[8]*Dy1[8] + Q2[9]*Dy1[9] + Q2[10]*Dy1[10] + Q2[11]*Dy1[11] + Q2[12]*Dy1[12] + Q2[13]*Dy1[13]; QDy1[1] = + Q2[14]*Dy1[0] + Q2[15]*Dy1[1] + Q2[16]*Dy1[2] + Q2[17]*Dy1[3] + Q2[18]*Dy1[4] + Q2[19]*Dy1[5] + Q2[20]*Dy1[6] + Q2[21]*Dy1[7] + Q2[22]*Dy1[8] + Q2[23]*Dy1[9] + Q2[24]*Dy1[10] + Q2[25]*Dy1[11] + Q2[26]*Dy1[12] + Q2[27]*Dy1[13]; QDy1[2] = + Q2[28]*Dy1[0] + Q2[29]*Dy1[1] + Q2[30]*Dy1[2] + Q2[31]*Dy1[3] + Q2[32]*Dy1[4] + Q2[33]*Dy1[5] + Q2[34]*Dy1[6] + Q2[35]*Dy1[7] + Q2[36]*Dy1[8] + Q2[37]*Dy1[9] + Q2[38]*Dy1[10] + Q2[39]*Dy1[11] + Q2[40]*Dy1[12] + Q2[41]*Dy1[13]; QDy1[3] = + Q2[42]*Dy1[0] + Q2[43]*Dy1[1] + Q2[44]*Dy1[2] + Q2[45]*Dy1[3] + Q2[46]*Dy1[4] + Q2[47]*Dy1[5] + Q2[48]*Dy1[6] + Q2[49]*Dy1[7] + Q2[50]*Dy1[8] + Q2[51]*Dy1[9] + Q2[52]*Dy1[10] + Q2[53]*Dy1[11] + Q2[54]*Dy1[12] + Q2[55]*Dy1[13]; QDy1[4] = + Q2[56]*Dy1[0] + Q2[57]*Dy1[1] + Q2[58]*Dy1[2] + Q2[59]*Dy1[3] + Q2[60]*Dy1[4] + Q2[61]*Dy1[5] + Q2[62]*Dy1[6] + Q2[63]*Dy1[7] + Q2[64]*Dy1[8] + Q2[65]*Dy1[9] + Q2[66]*Dy1[10] + Q2[67]*Dy1[11] + Q2[68]*Dy1[12] + Q2[69]*Dy1[13]; QDy1[5] = + Q2[70]*Dy1[0] + Q2[71]*Dy1[1] + Q2[72]*Dy1[2] + Q2[73]*Dy1[3] + Q2[74]*Dy1[4] + Q2[75]*Dy1[5] + Q2[76]*Dy1[6] + Q2[77]*Dy1[7] + Q2[78]*Dy1[8] + Q2[79]*Dy1[9] + Q2[80]*Dy1[10] + Q2[81]*Dy1[11] + Q2[82]*Dy1[12] + Q2[83]*Dy1[13]; QDy1[6] = + Q2[84]*Dy1[0] + Q2[85]*Dy1[1] + Q2[86]*Dy1[2] + Q2[87]*Dy1[3] + Q2[88]*Dy1[4] + Q2[89]*Dy1[5] + Q2[90]*Dy1[6] + Q2[91]*Dy1[7] + Q2[92]*Dy1[8] + Q2[93]*Dy1[9] + Q2[94]*Dy1[10] + Q2[95]*Dy1[11] + Q2[96]*Dy1[12] + Q2[97]*Dy1[13]; QDy1[7] = + Q2[98]*Dy1[0] + Q2[99]*Dy1[1] + Q2[100]*Dy1[2] + Q2[101]*Dy1[3] + Q2[102]*Dy1[4] + Q2[103]*Dy1[5] + Q2[104]*Dy1[6] + Q2[105]*Dy1[7] + Q2[106]*Dy1[8] + Q2[107]*Dy1[9] + Q2[108]*Dy1[10] + Q2[109]*Dy1[11] + Q2[110]*Dy1[12] + Q2[111]*Dy1[13]; QDy1[8] = + Q2[112]*Dy1[0] + Q2[113]*Dy1[1] + Q2[114]*Dy1[2] + Q2[115]*Dy1[3] + Q2[116]*Dy1[4] + Q2[117]*Dy1[5] + Q2[118]*Dy1[6] + Q2[119]*Dy1[7] + Q2[120]*Dy1[8] + Q2[121]*Dy1[9] + Q2[122]*Dy1[10] + Q2[123]*Dy1[11] + Q2[124]*Dy1[12] + Q2[125]*Dy1[13]; QDy1[9] = + Q2[126]*Dy1[0] + Q2[127]*Dy1[1] + Q2[128]*Dy1[2] + Q2[129]*Dy1[3] + Q2[130]*Dy1[4] + Q2[131]*Dy1[5] + Q2[132]*Dy1[6] + Q2[133]*Dy1[7] + Q2[134]*Dy1[8] + Q2[135]*Dy1[9] + Q2[136]*Dy1[10] + Q2[137]*Dy1[11] + Q2[138]*Dy1[12] + Q2[139]*Dy1[13]; } void acado_multHxE( real_t* const Hx, real_t* const E, int row, int col ) { acadoWorkspace.A[(row * 80) + (col * 4)] = + Hx[0]*E[0] + Hx[1]*E[4] + Hx[2]*E[8] + Hx[3]*E[12] + Hx[4]*E[16] + Hx[5]*E[20] + Hx[6]*E[24] + Hx[7]*E[28] + Hx[8]*E[32] + Hx[9]*E[36]; acadoWorkspace.A[(row * 80) + (col * 4 + 1)] = + Hx[0]*E[1] + Hx[1]*E[5] + Hx[2]*E[9] + Hx[3]*E[13] + Hx[4]*E[17] + Hx[5]*E[21] + Hx[6]*E[25] + Hx[7]*E[29] + Hx[8]*E[33] + Hx[9]*E[37]; acadoWorkspace.A[(row * 80) + (col * 4 + 2)] = + Hx[0]*E[2] + Hx[1]*E[6] + Hx[2]*E[10] + Hx[3]*E[14] + Hx[4]*E[18] + Hx[5]*E[22] + Hx[6]*E[26] + Hx[7]*E[30] + Hx[8]*E[34] + Hx[9]*E[38]; acadoWorkspace.A[(row * 80) + (col * 4 + 3)] = + Hx[0]*E[3] + Hx[1]*E[7] + Hx[2]*E[11] + Hx[3]*E[15] + Hx[4]*E[19] + Hx[5]*E[23] + Hx[6]*E[27] + Hx[7]*E[31] + Hx[8]*E[35] + Hx[9]*E[39]; } void acado_macHxd( real_t* const Hx, real_t* const tmpd, real_t* const lbA, real_t* const ubA ) { acadoWorkspace.evHxd[0] = + Hx[0]*tmpd[0] + Hx[1]*tmpd[1] + Hx[2]*tmpd[2] + Hx[3]*tmpd[3] + Hx[4]*tmpd[4] + Hx[5]*tmpd[5] + Hx[6]*tmpd[6] + Hx[7]*tmpd[7] + Hx[8]*tmpd[8] + Hx[9]*tmpd[9]; lbA[0] -= acadoWorkspace.evHxd[0]; ubA[0] -= acadoWorkspace.evHxd[0]; } void acado_condensePrep( ) { int lRun1; int lRun2; int lRun3; acado_moveGxT( acadoWorkspace.evGx, acadoWorkspace.C ); acado_multGxGx( &(acadoWorkspace.evGx[ 100 ]), acadoWorkspace.C, &(acadoWorkspace.C[ 100 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 200 ]), &(acadoWorkspace.C[ 100 ]), &(acadoWorkspace.C[ 200 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 300 ]), &(acadoWorkspace.C[ 200 ]), &(acadoWorkspace.C[ 300 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 400 ]), &(acadoWorkspace.C[ 300 ]), &(acadoWorkspace.C[ 400 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 500 ]), &(acadoWorkspace.C[ 400 ]), &(acadoWorkspace.C[ 500 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 600 ]), &(acadoWorkspace.C[ 500 ]), &(acadoWorkspace.C[ 600 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 700 ]), &(acadoWorkspace.C[ 600 ]), &(acadoWorkspace.C[ 700 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 800 ]), &(acadoWorkspace.C[ 700 ]), &(acadoWorkspace.C[ 800 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 900 ]), &(acadoWorkspace.C[ 800 ]), &(acadoWorkspace.C[ 900 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1000 ]), &(acadoWorkspace.C[ 900 ]), &(acadoWorkspace.C[ 1000 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1100 ]), &(acadoWorkspace.C[ 1000 ]), &(acadoWorkspace.C[ 1100 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1200 ]), &(acadoWorkspace.C[ 1100 ]), &(acadoWorkspace.C[ 1200 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1300 ]), &(acadoWorkspace.C[ 1200 ]), &(acadoWorkspace.C[ 1300 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1400 ]), &(acadoWorkspace.C[ 1300 ]), &(acadoWorkspace.C[ 1400 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1500 ]), &(acadoWorkspace.C[ 1400 ]), &(acadoWorkspace.C[ 1500 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1600 ]), &(acadoWorkspace.C[ 1500 ]), &(acadoWorkspace.C[ 1600 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1700 ]), &(acadoWorkspace.C[ 1600 ]), &(acadoWorkspace.C[ 1700 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1800 ]), &(acadoWorkspace.C[ 1700 ]), &(acadoWorkspace.C[ 1800 ]) ); acado_multGxGx( &(acadoWorkspace.evGx[ 1900 ]), &(acadoWorkspace.C[ 1800 ]), &(acadoWorkspace.C[ 1900 ]) ); for (lRun2 = 0; lRun2 < 20; ++lRun2) { lRun3 = ((lRun2) * (lRun2 * -1 + 41)) / (2); acado_moveGuE( &(acadoWorkspace.evGu[ lRun2 * 40 ]), &(acadoWorkspace.E[ lRun3 * 40 ]) ); for (lRun1 = 1; lRun1 < lRun2 * -1 + 20; ++lRun1) { acado_multGxGu( &(acadoWorkspace.evGx[ ((((lRun2) + (lRun1)) * (10)) * (10)) + (0) ]), &(acadoWorkspace.E[ (((((lRun3) + (lRun1)) - (1)) * (10)) * (4)) + (0) ]), &(acadoWorkspace.E[ ((((lRun3) + (lRun1)) * (10)) * (4)) + (0) ]) ); } acado_multGxGu( acadoWorkspace.QN1, &(acadoWorkspace.E[ ((((((lRun3) - (lRun2)) + (20)) - (1)) * (10)) * (4)) + (0) ]), acadoWorkspace.W1 ); for (lRun1 = 19; lRun2 < lRun1; --lRun1) { acado_multBTW1( &(acadoWorkspace.evGu[ lRun1 * 40 ]), acadoWorkspace.W1, lRun1, lRun2 ); acado_multGxTGu( &(acadoWorkspace.evGx[ lRun1 * 100 ]), acadoWorkspace.W1, acadoWorkspace.W2 ); acado_multQEW2( &(acadoWorkspace.Q1[ lRun1 * 100 ]), &(acadoWorkspace.E[ ((((((lRun3) + (lRun1)) - (lRun2)) - (1)) * (10)) * (4)) + (0) ]), acadoWorkspace.W2, acadoWorkspace.W1 ); } acado_multBTW1_R1( &(acadoWorkspace.R1[ lRun2 * 16 ]), &(acadoWorkspace.evGu[ lRun2 * 40 ]), acadoWorkspace.W1, lRun2 ); } acado_copyHTH( 0, 1 ); acado_copyHTH( 0, 2 ); acado_copyHTH( 1, 2 ); acado_copyHTH( 0, 3 ); acado_copyHTH( 1, 3 ); acado_copyHTH( 2, 3 ); acado_copyHTH( 0, 4 ); acado_copyHTH( 1, 4 ); acado_copyHTH( 2, 4 ); acado_copyHTH( 3, 4 ); acado_copyHTH( 0, 5 ); acado_copyHTH( 1, 5 ); acado_copyHTH( 2, 5 ); acado_copyHTH( 3, 5 ); acado_copyHTH( 4, 5 ); acado_copyHTH( 0, 6 ); acado_copyHTH( 1, 6 ); acado_copyHTH( 2, 6 ); acado_copyHTH( 3, 6 ); acado_copyHTH( 4, 6 ); acado_copyHTH( 5, 6 ); acado_copyHTH( 0, 7 ); acado_copyHTH( 1, 7 ); acado_copyHTH( 2, 7 ); acado_copyHTH( 3, 7 ); acado_copyHTH( 4, 7 ); acado_copyHTH( 5, 7 ); acado_copyHTH( 6, 7 ); acado_copyHTH( 0, 8 ); acado_copyHTH( 1, 8 ); acado_copyHTH( 2, 8 ); acado_copyHTH( 3, 8 ); acado_copyHTH( 4, 8 ); acado_copyHTH( 5, 8 ); acado_copyHTH( 6, 8 ); acado_copyHTH( 7, 8 ); acado_copyHTH( 0, 9 ); acado_copyHTH( 1, 9 ); acado_copyHTH( 2, 9 ); acado_copyHTH( 3, 9 ); acado_copyHTH( 4, 9 ); acado_copyHTH( 5, 9 ); acado_copyHTH( 6, 9 ); acado_copyHTH( 7, 9 ); acado_copyHTH( 8, 9 ); acado_copyHTH( 0, 10 ); acado_copyHTH( 1, 10 ); acado_copyHTH( 2, 10 ); acado_copyHTH( 3, 10 ); acado_copyHTH( 4, 10 ); acado_copyHTH( 5, 10 ); acado_copyHTH( 6, 10 ); acado_copyHTH( 7, 10 ); acado_copyHTH( 8, 10 ); acado_copyHTH( 9, 10 ); acado_copyHTH( 0, 11 ); acado_copyHTH( 1, 11 ); acado_copyHTH( 2, 11 ); acado_copyHTH( 3, 11 ); acado_copyHTH( 4, 11 ); acado_copyHTH( 5, 11 ); acado_copyHTH( 6, 11 ); acado_copyHTH( 7, 11 ); acado_copyHTH( 8, 11 ); acado_copyHTH( 9, 11 ); acado_copyHTH( 10, 11 ); acado_copyHTH( 0, 12 ); acado_copyHTH( 1, 12 ); acado_copyHTH( 2, 12 ); acado_copyHTH( 3, 12 ); acado_copyHTH( 4, 12 ); acado_copyHTH( 5, 12 ); acado_copyHTH( 6, 12 ); acado_copyHTH( 7, 12 ); acado_copyHTH( 8, 12 ); acado_copyHTH( 9, 12 ); acado_copyHTH( 10, 12 ); acado_copyHTH( 11, 12 ); acado_copyHTH( 0, 13 ); acado_copyHTH( 1, 13 ); acado_copyHTH( 2, 13 ); acado_copyHTH( 3, 13 ); acado_copyHTH( 4, 13 ); acado_copyHTH( 5, 13 ); acado_copyHTH( 6, 13 ); acado_copyHTH( 7, 13 ); acado_copyHTH( 8, 13 ); acado_copyHTH( 9, 13 ); acado_copyHTH( 10, 13 ); acado_copyHTH( 11, 13 ); acado_copyHTH( 12, 13 ); acado_copyHTH( 0, 14 ); acado_copyHTH( 1, 14 ); acado_copyHTH( 2, 14 ); acado_copyHTH( 3, 14 ); acado_copyHTH( 4, 14 ); acado_copyHTH( 5, 14 ); acado_copyHTH( 6, 14 ); acado_copyHTH( 7, 14 ); acado_copyHTH( 8, 14 ); acado_copyHTH( 9, 14 ); acado_copyHTH( 10, 14 ); acado_copyHTH( 11, 14 ); acado_copyHTH( 12, 14 ); acado_copyHTH( 13, 14 ); acado_copyHTH( 0, 15 ); acado_copyHTH( 1, 15 ); acado_copyHTH( 2, 15 ); acado_copyHTH( 3, 15 ); acado_copyHTH( 4, 15 ); acado_copyHTH( 5, 15 ); acado_copyHTH( 6, 15 ); acado_copyHTH( 7, 15 ); acado_copyHTH( 8, 15 ); acado_copyHTH( 9, 15 ); acado_copyHTH( 10, 15 ); acado_copyHTH( 11, 15 ); acado_copyHTH( 12, 15 ); acado_copyHTH( 13, 15 ); acado_copyHTH( 14, 15 ); acado_copyHTH( 0, 16 ); acado_copyHTH( 1, 16 ); acado_copyHTH( 2, 16 ); acado_copyHTH( 3, 16 ); acado_copyHTH( 4, 16 ); acado_copyHTH( 5, 16 ); acado_copyHTH( 6, 16 ); acado_copyHTH( 7, 16 ); acado_copyHTH( 8, 16 ); acado_copyHTH( 9, 16 ); acado_copyHTH( 10, 16 ); acado_copyHTH( 11, 16 ); acado_copyHTH( 12, 16 ); acado_copyHTH( 13, 16 ); acado_copyHTH( 14, 16 ); acado_copyHTH( 15, 16 ); acado_copyHTH( 0, 17 ); acado_copyHTH( 1, 17 ); acado_copyHTH( 2, 17 ); acado_copyHTH( 3, 17 ); acado_copyHTH( 4, 17 ); acado_copyHTH( 5, 17 ); acado_copyHTH( 6, 17 ); acado_copyHTH( 7, 17 ); acado_copyHTH( 8, 17 ); acado_copyHTH( 9, 17 ); acado_copyHTH( 10, 17 ); acado_copyHTH( 11, 17 ); acado_copyHTH( 12, 17 ); acado_copyHTH( 13, 17 ); acado_copyHTH( 14, 17 ); acado_copyHTH( 15, 17 ); acado_copyHTH( 16, 17 ); acado_copyHTH( 0, 18 ); acado_copyHTH( 1, 18 ); acado_copyHTH( 2, 18 ); acado_copyHTH( 3, 18 ); acado_copyHTH( 4, 18 ); acado_copyHTH( 5, 18 ); acado_copyHTH( 6, 18 ); acado_copyHTH( 7, 18 ); acado_copyHTH( 8, 18 ); acado_copyHTH( 9, 18 ); acado_copyHTH( 10, 18 ); acado_copyHTH( 11, 18 ); acado_copyHTH( 12, 18 ); acado_copyHTH( 13, 18 ); acado_copyHTH( 14, 18 ); acado_copyHTH( 15, 18 ); acado_copyHTH( 16, 18 ); acado_copyHTH( 17, 18 ); acado_copyHTH( 0, 19 ); acado_copyHTH( 1, 19 ); acado_copyHTH( 2, 19 ); acado_copyHTH( 3, 19 ); acado_copyHTH( 4, 19 ); acado_copyHTH( 5, 19 ); acado_copyHTH( 6, 19 ); acado_copyHTH( 7, 19 ); acado_copyHTH( 8, 19 ); acado_copyHTH( 9, 19 ); acado_copyHTH( 10, 19 ); acado_copyHTH( 11, 19 ); acado_copyHTH( 12, 19 ); acado_copyHTH( 13, 19 ); acado_copyHTH( 14, 19 ); acado_copyHTH( 15, 19 ); acado_copyHTH( 16, 19 ); acado_copyHTH( 17, 19 ); acado_copyHTH( 18, 19 ); for (lRun2 = 0; lRun2 < 200; ++lRun2) acadoWorkspace.sbar[lRun2 + 10] = acadoWorkspace.d[lRun2]; for (lRun1 = 0; lRun1 < 20; ++lRun1) { acadoWorkspace.conValueIn[0] = acadoVariables.x[lRun1 * 10]; acadoWorkspace.conValueIn[1] = acadoVariables.x[lRun1 * 10 + 1]; acadoWorkspace.conValueIn[2] = acadoVariables.x[lRun1 * 10 + 2]; acadoWorkspace.conValueIn[3] = acadoVariables.x[lRun1 * 10 + 3]; acadoWorkspace.conValueIn[4] = acadoVariables.x[lRun1 * 10 + 4]; acadoWorkspace.conValueIn[5] = acadoVariables.x[lRun1 * 10 + 5]; acadoWorkspace.conValueIn[6] = acadoVariables.x[lRun1 * 10 + 6]; acadoWorkspace.conValueIn[7] = acadoVariables.x[lRun1 * 10 + 7]; acadoWorkspace.conValueIn[8] = acadoVariables.x[lRun1 * 10 + 8]; acadoWorkspace.conValueIn[9] = acadoVariables.x[lRun1 * 10 + 9]; acadoWorkspace.conValueIn[10] = acadoVariables.u[lRun1 * 4]; acadoWorkspace.conValueIn[11] = acadoVariables.u[lRun1 * 4 + 1]; acadoWorkspace.conValueIn[12] = acadoVariables.u[lRun1 * 4 + 2]; acadoWorkspace.conValueIn[13] = acadoVariables.u[lRun1 * 4 + 3]; acadoWorkspace.conValueIn[14] = acadoVariables.od[lRun1 * 10]; acadoWorkspace.conValueIn[15] = acadoVariables.od[lRun1 * 10 + 1]; acadoWorkspace.conValueIn[16] = acadoVariables.od[lRun1 * 10 + 2]; acadoWorkspace.conValueIn[17] = acadoVariables.od[lRun1 * 10 + 3]; acadoWorkspace.conValueIn[18] = acadoVariables.od[lRun1 * 10 + 4]; acadoWorkspace.conValueIn[19] = acadoVariables.od[lRun1 * 10 + 5]; acadoWorkspace.conValueIn[20] = acadoVariables.od[lRun1 * 10 + 6]; acadoWorkspace.conValueIn[21] = acadoVariables.od[lRun1 * 10 + 7]; acadoWorkspace.conValueIn[22] = acadoVariables.od[lRun1 * 10 + 8]; acadoWorkspace.conValueIn[23] = acadoVariables.od[lRun1 * 10 + 9]; acado_evaluatePathConstraints( acadoWorkspace.conValueIn, acadoWorkspace.conValueOut ); acadoWorkspace.evH[lRun1] = acadoWorkspace.conValueOut[0]; acadoWorkspace.evHx[lRun1 * 10] = acadoWorkspace.conValueOut[1]; acadoWorkspace.evHx[lRun1 * 10 + 1] = acadoWorkspace.conValueOut[2]; acadoWorkspace.evHx[lRun1 * 10 + 2] = acadoWorkspace.conValueOut[3]; acadoWorkspace.evHx[lRun1 * 10 + 3] = acadoWorkspace.conValueOut[4]; acadoWorkspace.evHx[lRun1 * 10 + 4] = acadoWorkspace.conValueOut[5]; acadoWorkspace.evHx[lRun1 * 10 + 5] = acadoWorkspace.conValueOut[6]; acadoWorkspace.evHx[lRun1 * 10 + 6] = acadoWorkspace.conValueOut[7]; acadoWorkspace.evHx[lRun1 * 10 + 7] = acadoWorkspace.conValueOut[8]; acadoWorkspace.evHx[lRun1 * 10 + 8] = acadoWorkspace.conValueOut[9]; acadoWorkspace.evHx[lRun1 * 10 + 9] = acadoWorkspace.conValueOut[10]; acadoWorkspace.evHu[lRun1 * 4] = acadoWorkspace.conValueOut[11]; acadoWorkspace.evHu[lRun1 * 4 + 1] = acadoWorkspace.conValueOut[12]; acadoWorkspace.evHu[lRun1 * 4 + 2] = acadoWorkspace.conValueOut[13]; acadoWorkspace.evHu[lRun1 * 4 + 3] = acadoWorkspace.conValueOut[14]; } acado_multHxE( &(acadoWorkspace.evHx[ 10 ]), acadoWorkspace.E, 1, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 20 ]), &(acadoWorkspace.E[ 40 ]), 2, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 20 ]), &(acadoWorkspace.E[ 800 ]), 2, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 30 ]), &(acadoWorkspace.E[ 80 ]), 3, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 30 ]), &(acadoWorkspace.E[ 840 ]), 3, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 30 ]), &(acadoWorkspace.E[ 1560 ]), 3, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 40 ]), &(acadoWorkspace.E[ 120 ]), 4, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 40 ]), &(acadoWorkspace.E[ 880 ]), 4, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 40 ]), &(acadoWorkspace.E[ 1600 ]), 4, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 40 ]), &(acadoWorkspace.E[ 2280 ]), 4, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 50 ]), &(acadoWorkspace.E[ 160 ]), 5, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 50 ]), &(acadoWorkspace.E[ 920 ]), 5, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 50 ]), &(acadoWorkspace.E[ 1640 ]), 5, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 50 ]), &(acadoWorkspace.E[ 2320 ]), 5, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 50 ]), &(acadoWorkspace.E[ 2960 ]), 5, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.E[ 200 ]), 6, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.E[ 960 ]), 6, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.E[ 1680 ]), 6, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.E[ 2360 ]), 6, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.E[ 3000 ]), 6, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.E[ 3600 ]), 6, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 240 ]), 7, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 1000 ]), 7, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 1720 ]), 7, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 2400 ]), 7, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 3040 ]), 7, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 3640 ]), 7, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.E[ 4200 ]), 7, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 280 ]), 8, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 1040 ]), 8, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 1760 ]), 8, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 2440 ]), 8, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 3080 ]), 8, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 3680 ]), 8, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 4240 ]), 8, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.E[ 4760 ]), 8, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 320 ]), 9, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 1080 ]), 9, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 1800 ]), 9, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 2480 ]), 9, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 3120 ]), 9, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 3720 ]), 9, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 4280 ]), 9, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 4800 ]), 9, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.E[ 5280 ]), 9, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 360 ]), 10, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 1120 ]), 10, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 1840 ]), 10, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 2520 ]), 10, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 3160 ]), 10, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 3760 ]), 10, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 4320 ]), 10, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 4840 ]), 10, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 5320 ]), 10, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.E[ 5760 ]), 10, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 400 ]), 11, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 1160 ]), 11, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 1880 ]), 11, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 2560 ]), 11, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 3200 ]), 11, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 3800 ]), 11, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 4360 ]), 11, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 4880 ]), 11, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 5360 ]), 11, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 5800 ]), 11, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.E[ 6200 ]), 11, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 440 ]), 12, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 1200 ]), 12, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 1920 ]), 12, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 2600 ]), 12, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 3240 ]), 12, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 3840 ]), 12, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 4400 ]), 12, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 4920 ]), 12, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 5400 ]), 12, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 5840 ]), 12, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 6240 ]), 12, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.E[ 6600 ]), 12, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 480 ]), 13, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 1240 ]), 13, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 1960 ]), 13, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 2640 ]), 13, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 3280 ]), 13, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 3880 ]), 13, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 4440 ]), 13, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 4960 ]), 13, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 5440 ]), 13, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 5880 ]), 13, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 6280 ]), 13, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 6640 ]), 13, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.E[ 6960 ]), 13, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 520 ]), 14, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 1280 ]), 14, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 2000 ]), 14, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 2680 ]), 14, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 3320 ]), 14, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 3920 ]), 14, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 4480 ]), 14, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 5000 ]), 14, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 5480 ]), 14, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 5920 ]), 14, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 6320 ]), 14, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 6680 ]), 14, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 7000 ]), 14, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.E[ 7280 ]), 14, 13 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 560 ]), 15, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 1320 ]), 15, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 2040 ]), 15, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 2720 ]), 15, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 3360 ]), 15, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 3960 ]), 15, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 4520 ]), 15, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 5040 ]), 15, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 5520 ]), 15, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 5960 ]), 15, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 6360 ]), 15, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 6720 ]), 15, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 7040 ]), 15, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 7320 ]), 15, 13 ); acado_multHxE( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.E[ 7560 ]), 15, 14 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 600 ]), 16, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 1360 ]), 16, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 2080 ]), 16, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 2760 ]), 16, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 3400 ]), 16, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 4000 ]), 16, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 4560 ]), 16, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 5080 ]), 16, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 5560 ]), 16, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 6000 ]), 16, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 6400 ]), 16, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 6760 ]), 16, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 7080 ]), 16, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 7360 ]), 16, 13 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 7600 ]), 16, 14 ); acado_multHxE( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.E[ 7800 ]), 16, 15 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 640 ]), 17, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 1400 ]), 17, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 2120 ]), 17, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 2800 ]), 17, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 3440 ]), 17, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 4040 ]), 17, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 4600 ]), 17, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 5120 ]), 17, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 5600 ]), 17, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 6040 ]), 17, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 6440 ]), 17, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 6800 ]), 17, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 7120 ]), 17, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 7400 ]), 17, 13 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 7640 ]), 17, 14 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 7840 ]), 17, 15 ); acado_multHxE( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.E[ 8000 ]), 17, 16 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 680 ]), 18, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 1440 ]), 18, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 2160 ]), 18, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 2840 ]), 18, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 3480 ]), 18, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 4080 ]), 18, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 4640 ]), 18, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 5160 ]), 18, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 5640 ]), 18, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 6080 ]), 18, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 6480 ]), 18, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 6840 ]), 18, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 7160 ]), 18, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 7440 ]), 18, 13 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 7680 ]), 18, 14 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 7880 ]), 18, 15 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 8040 ]), 18, 16 ); acado_multHxE( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.E[ 8160 ]), 18, 17 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 720 ]), 19, 0 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 1480 ]), 19, 1 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 2200 ]), 19, 2 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 2880 ]), 19, 3 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 3520 ]), 19, 4 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 4120 ]), 19, 5 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 4680 ]), 19, 6 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 5200 ]), 19, 7 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 5680 ]), 19, 8 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 6120 ]), 19, 9 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 6520 ]), 19, 10 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 6880 ]), 19, 11 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 7200 ]), 19, 12 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 7480 ]), 19, 13 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 7720 ]), 19, 14 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 7920 ]), 19, 15 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 8080 ]), 19, 16 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 8200 ]), 19, 17 ); acado_multHxE( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.E[ 8280 ]), 19, 18 ); acadoWorkspace.A[0] = acadoWorkspace.evHu[0]; acadoWorkspace.A[1] = acadoWorkspace.evHu[1]; acadoWorkspace.A[2] = acadoWorkspace.evHu[2]; acadoWorkspace.A[3] = acadoWorkspace.evHu[3]; acadoWorkspace.A[84] = acadoWorkspace.evHu[4]; acadoWorkspace.A[85] = acadoWorkspace.evHu[5]; acadoWorkspace.A[86] = acadoWorkspace.evHu[6]; acadoWorkspace.A[87] = acadoWorkspace.evHu[7]; acadoWorkspace.A[168] = acadoWorkspace.evHu[8]; acadoWorkspace.A[169] = acadoWorkspace.evHu[9]; acadoWorkspace.A[170] = acadoWorkspace.evHu[10]; acadoWorkspace.A[171] = acadoWorkspace.evHu[11]; acadoWorkspace.A[252] = acadoWorkspace.evHu[12]; acadoWorkspace.A[253] = acadoWorkspace.evHu[13]; acadoWorkspace.A[254] = acadoWorkspace.evHu[14]; acadoWorkspace.A[255] = acadoWorkspace.evHu[15]; acadoWorkspace.A[336] = acadoWorkspace.evHu[16]; acadoWorkspace.A[337] = acadoWorkspace.evHu[17]; acadoWorkspace.A[338] = acadoWorkspace.evHu[18]; acadoWorkspace.A[339] = acadoWorkspace.evHu[19]; acadoWorkspace.A[420] = acadoWorkspace.evHu[20]; acadoWorkspace.A[421] = acadoWorkspace.evHu[21]; acadoWorkspace.A[422] = acadoWorkspace.evHu[22]; acadoWorkspace.A[423] = acadoWorkspace.evHu[23]; acadoWorkspace.A[504] = acadoWorkspace.evHu[24]; acadoWorkspace.A[505] = acadoWorkspace.evHu[25]; acadoWorkspace.A[506] = acadoWorkspace.evHu[26]; acadoWorkspace.A[507] = acadoWorkspace.evHu[27]; acadoWorkspace.A[588] = acadoWorkspace.evHu[28]; acadoWorkspace.A[589] = acadoWorkspace.evHu[29]; acadoWorkspace.A[590] = acadoWorkspace.evHu[30]; acadoWorkspace.A[591] = acadoWorkspace.evHu[31]; acadoWorkspace.A[672] = acadoWorkspace.evHu[32]; acadoWorkspace.A[673] = acadoWorkspace.evHu[33]; acadoWorkspace.A[674] = acadoWorkspace.evHu[34]; acadoWorkspace.A[675] = acadoWorkspace.evHu[35]; acadoWorkspace.A[756] = acadoWorkspace.evHu[36]; acadoWorkspace.A[757] = acadoWorkspace.evHu[37]; acadoWorkspace.A[758] = acadoWorkspace.evHu[38]; acadoWorkspace.A[759] = acadoWorkspace.evHu[39]; acadoWorkspace.A[840] = acadoWorkspace.evHu[40]; acadoWorkspace.A[841] = acadoWorkspace.evHu[41]; acadoWorkspace.A[842] = acadoWorkspace.evHu[42]; acadoWorkspace.A[843] = acadoWorkspace.evHu[43]; acadoWorkspace.A[924] = acadoWorkspace.evHu[44]; acadoWorkspace.A[925] = acadoWorkspace.evHu[45]; acadoWorkspace.A[926] = acadoWorkspace.evHu[46]; acadoWorkspace.A[927] = acadoWorkspace.evHu[47]; acadoWorkspace.A[1008] = acadoWorkspace.evHu[48]; acadoWorkspace.A[1009] = acadoWorkspace.evHu[49]; acadoWorkspace.A[1010] = acadoWorkspace.evHu[50]; acadoWorkspace.A[1011] = acadoWorkspace.evHu[51]; acadoWorkspace.A[1092] = acadoWorkspace.evHu[52]; acadoWorkspace.A[1093] = acadoWorkspace.evHu[53]; acadoWorkspace.A[1094] = acadoWorkspace.evHu[54]; acadoWorkspace.A[1095] = acadoWorkspace.evHu[55]; acadoWorkspace.A[1176] = acadoWorkspace.evHu[56]; acadoWorkspace.A[1177] = acadoWorkspace.evHu[57]; acadoWorkspace.A[1178] = acadoWorkspace.evHu[58]; acadoWorkspace.A[1179] = acadoWorkspace.evHu[59]; acadoWorkspace.A[1260] = acadoWorkspace.evHu[60]; acadoWorkspace.A[1261] = acadoWorkspace.evHu[61]; acadoWorkspace.A[1262] = acadoWorkspace.evHu[62]; acadoWorkspace.A[1263] = acadoWorkspace.evHu[63]; acadoWorkspace.A[1344] = acadoWorkspace.evHu[64]; acadoWorkspace.A[1345] = acadoWorkspace.evHu[65]; acadoWorkspace.A[1346] = acadoWorkspace.evHu[66]; acadoWorkspace.A[1347] = acadoWorkspace.evHu[67]; acadoWorkspace.A[1428] = acadoWorkspace.evHu[68]; acadoWorkspace.A[1429] = acadoWorkspace.evHu[69]; acadoWorkspace.A[1430] = acadoWorkspace.evHu[70]; acadoWorkspace.A[1431] = acadoWorkspace.evHu[71]; acadoWorkspace.A[1512] = acadoWorkspace.evHu[72]; acadoWorkspace.A[1513] = acadoWorkspace.evHu[73]; acadoWorkspace.A[1514] = acadoWorkspace.evHu[74]; acadoWorkspace.A[1515] = acadoWorkspace.evHu[75]; acadoWorkspace.A[1596] = acadoWorkspace.evHu[76]; acadoWorkspace.A[1597] = acadoWorkspace.evHu[77]; acadoWorkspace.A[1598] = acadoWorkspace.evHu[78]; acadoWorkspace.A[1599] = acadoWorkspace.evHu[79]; acadoWorkspace.lbA[0] = acadoVariables.lbAValues[0] - acadoWorkspace.evH[0]; acadoWorkspace.lbA[1] = acadoVariables.lbAValues[1] - acadoWorkspace.evH[1]; acadoWorkspace.lbA[2] = acadoVariables.lbAValues[2] - acadoWorkspace.evH[2]; acadoWorkspace.lbA[3] = acadoVariables.lbAValues[3] - acadoWorkspace.evH[3]; acadoWorkspace.lbA[4] = acadoVariables.lbAValues[4] - acadoWorkspace.evH[4]; acadoWorkspace.lbA[5] = acadoVariables.lbAValues[5] - acadoWorkspace.evH[5]; acadoWorkspace.lbA[6] = acadoVariables.lbAValues[6] - acadoWorkspace.evH[6]; acadoWorkspace.lbA[7] = acadoVariables.lbAValues[7] - acadoWorkspace.evH[7]; acadoWorkspace.lbA[8] = acadoVariables.lbAValues[8] - acadoWorkspace.evH[8]; acadoWorkspace.lbA[9] = acadoVariables.lbAValues[9] - acadoWorkspace.evH[9]; acadoWorkspace.lbA[10] = acadoVariables.lbAValues[10] - acadoWorkspace.evH[10]; acadoWorkspace.lbA[11] = acadoVariables.lbAValues[11] - acadoWorkspace.evH[11]; acadoWorkspace.lbA[12] = acadoVariables.lbAValues[12] - acadoWorkspace.evH[12]; acadoWorkspace.lbA[13] = acadoVariables.lbAValues[13] - acadoWorkspace.evH[13]; acadoWorkspace.lbA[14] = acadoVariables.lbAValues[14] - acadoWorkspace.evH[14]; acadoWorkspace.lbA[15] = acadoVariables.lbAValues[15] - acadoWorkspace.evH[15]; acadoWorkspace.lbA[16] = acadoVariables.lbAValues[16] - acadoWorkspace.evH[16]; acadoWorkspace.lbA[17] = acadoVariables.lbAValues[17] - acadoWorkspace.evH[17]; acadoWorkspace.lbA[18] = acadoVariables.lbAValues[18] - acadoWorkspace.evH[18]; acadoWorkspace.lbA[19] = acadoVariables.lbAValues[19] - acadoWorkspace.evH[19]; acadoWorkspace.ubA[0] = acadoVariables.ubAValues[0] - acadoWorkspace.evH[0]; acadoWorkspace.ubA[1] = acadoVariables.ubAValues[1] - acadoWorkspace.evH[1]; acadoWorkspace.ubA[2] = acadoVariables.ubAValues[2] - acadoWorkspace.evH[2]; acadoWorkspace.ubA[3] = acadoVariables.ubAValues[3] - acadoWorkspace.evH[3]; acadoWorkspace.ubA[4] = acadoVariables.ubAValues[4] - acadoWorkspace.evH[4]; acadoWorkspace.ubA[5] = acadoVariables.ubAValues[5] - acadoWorkspace.evH[5]; acadoWorkspace.ubA[6] = acadoVariables.ubAValues[6] - acadoWorkspace.evH[6]; acadoWorkspace.ubA[7] = acadoVariables.ubAValues[7] - acadoWorkspace.evH[7]; acadoWorkspace.ubA[8] = acadoVariables.ubAValues[8] - acadoWorkspace.evH[8]; acadoWorkspace.ubA[9] = acadoVariables.ubAValues[9] - acadoWorkspace.evH[9]; acadoWorkspace.ubA[10] = acadoVariables.ubAValues[10] - acadoWorkspace.evH[10]; acadoWorkspace.ubA[11] = acadoVariables.ubAValues[11] - acadoWorkspace.evH[11]; acadoWorkspace.ubA[12] = acadoVariables.ubAValues[12] - acadoWorkspace.evH[12]; acadoWorkspace.ubA[13] = acadoVariables.ubAValues[13] - acadoWorkspace.evH[13]; acadoWorkspace.ubA[14] = acadoVariables.ubAValues[14] - acadoWorkspace.evH[14]; acadoWorkspace.ubA[15] = acadoVariables.ubAValues[15] - acadoWorkspace.evH[15]; acadoWorkspace.ubA[16] = acadoVariables.ubAValues[16] - acadoWorkspace.evH[16]; acadoWorkspace.ubA[17] = acadoVariables.ubAValues[17] - acadoWorkspace.evH[17]; acadoWorkspace.ubA[18] = acadoVariables.ubAValues[18] - acadoWorkspace.evH[18]; acadoWorkspace.ubA[19] = acadoVariables.ubAValues[19] - acadoWorkspace.evH[19]; } void acado_condenseFdb( ) { int lRun1; acadoWorkspace.Dx0[0] = acadoVariables.x0[0] - acadoVariables.x[0]; acadoWorkspace.Dx0[1] = acadoVariables.x0[1] - acadoVariables.x[1]; acadoWorkspace.Dx0[2] = acadoVariables.x0[2] - acadoVariables.x[2]; acadoWorkspace.Dx0[3] = acadoVariables.x0[3] - acadoVariables.x[3]; acadoWorkspace.Dx0[4] = acadoVariables.x0[4] - acadoVariables.x[4]; acadoWorkspace.Dx0[5] = acadoVariables.x0[5] - acadoVariables.x[5]; acadoWorkspace.Dx0[6] = acadoVariables.x0[6] - acadoVariables.x[6]; acadoWorkspace.Dx0[7] = acadoVariables.x0[7] - acadoVariables.x[7]; acadoWorkspace.Dx0[8] = acadoVariables.x0[8] - acadoVariables.x[8]; acadoWorkspace.Dx0[9] = acadoVariables.x0[9] - acadoVariables.x[9]; for (lRun1 = 0; lRun1 < 280; ++lRun1) acadoWorkspace.Dy[lRun1] -= acadoVariables.y[lRun1]; acadoWorkspace.DyN[0] -= acadoVariables.yN[0]; acadoWorkspace.DyN[1] -= acadoVariables.yN[1]; acadoWorkspace.DyN[2] -= acadoVariables.yN[2]; acadoWorkspace.DyN[3] -= acadoVariables.yN[3]; acadoWorkspace.DyN[4] -= acadoVariables.yN[4]; acadoWorkspace.DyN[5] -= acadoVariables.yN[5]; acadoWorkspace.DyN[6] -= acadoVariables.yN[6]; acadoWorkspace.DyN[7] -= acadoVariables.yN[7]; acadoWorkspace.DyN[8] -= acadoVariables.yN[8]; acadoWorkspace.DyN[9] -= acadoVariables.yN[9]; acado_multRDy( acadoWorkspace.R2, acadoWorkspace.Dy, acadoWorkspace.g ); acado_multRDy( &(acadoWorkspace.R2[ 56 ]), &(acadoWorkspace.Dy[ 14 ]), &(acadoWorkspace.g[ 4 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 112 ]), &(acadoWorkspace.Dy[ 28 ]), &(acadoWorkspace.g[ 8 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 168 ]), &(acadoWorkspace.Dy[ 42 ]), &(acadoWorkspace.g[ 12 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 224 ]), &(acadoWorkspace.Dy[ 56 ]), &(acadoWorkspace.g[ 16 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 280 ]), &(acadoWorkspace.Dy[ 70 ]), &(acadoWorkspace.g[ 20 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 336 ]), &(acadoWorkspace.Dy[ 84 ]), &(acadoWorkspace.g[ 24 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 392 ]), &(acadoWorkspace.Dy[ 98 ]), &(acadoWorkspace.g[ 28 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 448 ]), &(acadoWorkspace.Dy[ 112 ]), &(acadoWorkspace.g[ 32 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 504 ]), &(acadoWorkspace.Dy[ 126 ]), &(acadoWorkspace.g[ 36 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 560 ]), &(acadoWorkspace.Dy[ 140 ]), &(acadoWorkspace.g[ 40 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 616 ]), &(acadoWorkspace.Dy[ 154 ]), &(acadoWorkspace.g[ 44 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 672 ]), &(acadoWorkspace.Dy[ 168 ]), &(acadoWorkspace.g[ 48 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 728 ]), &(acadoWorkspace.Dy[ 182 ]), &(acadoWorkspace.g[ 52 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 784 ]), &(acadoWorkspace.Dy[ 196 ]), &(acadoWorkspace.g[ 56 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 840 ]), &(acadoWorkspace.Dy[ 210 ]), &(acadoWorkspace.g[ 60 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 896 ]), &(acadoWorkspace.Dy[ 224 ]), &(acadoWorkspace.g[ 64 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 952 ]), &(acadoWorkspace.Dy[ 238 ]), &(acadoWorkspace.g[ 68 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 1008 ]), &(acadoWorkspace.Dy[ 252 ]), &(acadoWorkspace.g[ 72 ]) ); acado_multRDy( &(acadoWorkspace.R2[ 1064 ]), &(acadoWorkspace.Dy[ 266 ]), &(acadoWorkspace.g[ 76 ]) ); acado_multQDy( acadoWorkspace.Q2, acadoWorkspace.Dy, acadoWorkspace.QDy ); acado_multQDy( &(acadoWorkspace.Q2[ 140 ]), &(acadoWorkspace.Dy[ 14 ]), &(acadoWorkspace.QDy[ 10 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 280 ]), &(acadoWorkspace.Dy[ 28 ]), &(acadoWorkspace.QDy[ 20 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 420 ]), &(acadoWorkspace.Dy[ 42 ]), &(acadoWorkspace.QDy[ 30 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 560 ]), &(acadoWorkspace.Dy[ 56 ]), &(acadoWorkspace.QDy[ 40 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 700 ]), &(acadoWorkspace.Dy[ 70 ]), &(acadoWorkspace.QDy[ 50 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 840 ]), &(acadoWorkspace.Dy[ 84 ]), &(acadoWorkspace.QDy[ 60 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 980 ]), &(acadoWorkspace.Dy[ 98 ]), &(acadoWorkspace.QDy[ 70 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1120 ]), &(acadoWorkspace.Dy[ 112 ]), &(acadoWorkspace.QDy[ 80 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1260 ]), &(acadoWorkspace.Dy[ 126 ]), &(acadoWorkspace.QDy[ 90 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1400 ]), &(acadoWorkspace.Dy[ 140 ]), &(acadoWorkspace.QDy[ 100 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1540 ]), &(acadoWorkspace.Dy[ 154 ]), &(acadoWorkspace.QDy[ 110 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1680 ]), &(acadoWorkspace.Dy[ 168 ]), &(acadoWorkspace.QDy[ 120 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1820 ]), &(acadoWorkspace.Dy[ 182 ]), &(acadoWorkspace.QDy[ 130 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 1960 ]), &(acadoWorkspace.Dy[ 196 ]), &(acadoWorkspace.QDy[ 140 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 2100 ]), &(acadoWorkspace.Dy[ 210 ]), &(acadoWorkspace.QDy[ 150 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 2240 ]), &(acadoWorkspace.Dy[ 224 ]), &(acadoWorkspace.QDy[ 160 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 2380 ]), &(acadoWorkspace.Dy[ 238 ]), &(acadoWorkspace.QDy[ 170 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 2520 ]), &(acadoWorkspace.Dy[ 252 ]), &(acadoWorkspace.QDy[ 180 ]) ); acado_multQDy( &(acadoWorkspace.Q2[ 2660 ]), &(acadoWorkspace.Dy[ 266 ]), &(acadoWorkspace.QDy[ 190 ]) ); acadoWorkspace.QDy[200] = + acadoWorkspace.QN2[0]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[1]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[2]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[3]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[4]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[5]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[6]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[7]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[8]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[9]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[201] = + acadoWorkspace.QN2[10]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[11]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[12]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[13]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[14]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[15]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[16]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[17]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[18]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[19]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[202] = + acadoWorkspace.QN2[20]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[21]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[22]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[23]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[24]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[25]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[26]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[27]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[28]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[29]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[203] = + acadoWorkspace.QN2[30]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[31]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[32]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[33]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[34]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[35]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[36]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[37]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[38]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[39]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[204] = + acadoWorkspace.QN2[40]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[41]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[42]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[43]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[44]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[45]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[46]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[47]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[48]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[49]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[205] = + acadoWorkspace.QN2[50]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[51]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[52]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[53]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[54]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[55]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[56]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[57]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[58]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[59]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[206] = + acadoWorkspace.QN2[60]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[61]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[62]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[63]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[64]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[65]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[66]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[67]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[68]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[69]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[207] = + acadoWorkspace.QN2[70]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[71]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[72]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[73]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[74]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[75]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[76]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[77]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[78]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[79]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[208] = + acadoWorkspace.QN2[80]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[81]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[82]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[83]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[84]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[85]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[86]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[87]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[88]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[89]*acadoWorkspace.DyN[9]; acadoWorkspace.QDy[209] = + acadoWorkspace.QN2[90]*acadoWorkspace.DyN[0] + acadoWorkspace.QN2[91]*acadoWorkspace.DyN[1] + acadoWorkspace.QN2[92]*acadoWorkspace.DyN[2] + acadoWorkspace.QN2[93]*acadoWorkspace.DyN[3] + acadoWorkspace.QN2[94]*acadoWorkspace.DyN[4] + acadoWorkspace.QN2[95]*acadoWorkspace.DyN[5] + acadoWorkspace.QN2[96]*acadoWorkspace.DyN[6] + acadoWorkspace.QN2[97]*acadoWorkspace.DyN[7] + acadoWorkspace.QN2[98]*acadoWorkspace.DyN[8] + acadoWorkspace.QN2[99]*acadoWorkspace.DyN[9]; acadoWorkspace.sbar[0] = acadoWorkspace.Dx0[0]; acadoWorkspace.sbar[1] = acadoWorkspace.Dx0[1]; acadoWorkspace.sbar[2] = acadoWorkspace.Dx0[2]; acadoWorkspace.sbar[3] = acadoWorkspace.Dx0[3]; acadoWorkspace.sbar[4] = acadoWorkspace.Dx0[4]; acadoWorkspace.sbar[5] = acadoWorkspace.Dx0[5]; acadoWorkspace.sbar[6] = acadoWorkspace.Dx0[6]; acadoWorkspace.sbar[7] = acadoWorkspace.Dx0[7]; acadoWorkspace.sbar[8] = acadoWorkspace.Dx0[8]; acadoWorkspace.sbar[9] = acadoWorkspace.Dx0[9]; acado_macASbar( acadoWorkspace.evGx, acadoWorkspace.sbar, &(acadoWorkspace.sbar[ 10 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 100 ]), &(acadoWorkspace.sbar[ 10 ]), &(acadoWorkspace.sbar[ 20 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 200 ]), &(acadoWorkspace.sbar[ 20 ]), &(acadoWorkspace.sbar[ 30 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 300 ]), &(acadoWorkspace.sbar[ 30 ]), &(acadoWorkspace.sbar[ 40 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 400 ]), &(acadoWorkspace.sbar[ 40 ]), &(acadoWorkspace.sbar[ 50 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 500 ]), &(acadoWorkspace.sbar[ 50 ]), &(acadoWorkspace.sbar[ 60 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 600 ]), &(acadoWorkspace.sbar[ 60 ]), &(acadoWorkspace.sbar[ 70 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 700 ]), &(acadoWorkspace.sbar[ 70 ]), &(acadoWorkspace.sbar[ 80 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 800 ]), &(acadoWorkspace.sbar[ 80 ]), &(acadoWorkspace.sbar[ 90 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 900 ]), &(acadoWorkspace.sbar[ 90 ]), &(acadoWorkspace.sbar[ 100 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1000 ]), &(acadoWorkspace.sbar[ 100 ]), &(acadoWorkspace.sbar[ 110 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1100 ]), &(acadoWorkspace.sbar[ 110 ]), &(acadoWorkspace.sbar[ 120 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1200 ]), &(acadoWorkspace.sbar[ 120 ]), &(acadoWorkspace.sbar[ 130 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1300 ]), &(acadoWorkspace.sbar[ 130 ]), &(acadoWorkspace.sbar[ 140 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1400 ]), &(acadoWorkspace.sbar[ 140 ]), &(acadoWorkspace.sbar[ 150 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1500 ]), &(acadoWorkspace.sbar[ 150 ]), &(acadoWorkspace.sbar[ 160 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1600 ]), &(acadoWorkspace.sbar[ 160 ]), &(acadoWorkspace.sbar[ 170 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1700 ]), &(acadoWorkspace.sbar[ 170 ]), &(acadoWorkspace.sbar[ 180 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1800 ]), &(acadoWorkspace.sbar[ 180 ]), &(acadoWorkspace.sbar[ 190 ]) ); acado_macASbar( &(acadoWorkspace.evGx[ 1900 ]), &(acadoWorkspace.sbar[ 190 ]), &(acadoWorkspace.sbar[ 200 ]) ); acadoWorkspace.w1[0] = + acadoWorkspace.QN1[0]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[1]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[2]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[3]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[4]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[5]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[6]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[7]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[8]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[9]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[200]; acadoWorkspace.w1[1] = + acadoWorkspace.QN1[10]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[11]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[12]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[13]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[14]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[15]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[16]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[17]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[18]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[19]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[201]; acadoWorkspace.w1[2] = + acadoWorkspace.QN1[20]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[21]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[22]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[23]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[24]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[25]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[26]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[27]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[28]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[29]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[202]; acadoWorkspace.w1[3] = + acadoWorkspace.QN1[30]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[31]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[32]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[33]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[34]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[35]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[36]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[37]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[38]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[39]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[203]; acadoWorkspace.w1[4] = + acadoWorkspace.QN1[40]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[41]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[42]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[43]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[44]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[45]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[46]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[47]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[48]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[49]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[204]; acadoWorkspace.w1[5] = + acadoWorkspace.QN1[50]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[51]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[52]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[53]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[54]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[55]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[56]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[57]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[58]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[59]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[205]; acadoWorkspace.w1[6] = + acadoWorkspace.QN1[60]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[61]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[62]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[63]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[64]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[65]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[66]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[67]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[68]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[69]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[206]; acadoWorkspace.w1[7] = + acadoWorkspace.QN1[70]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[71]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[72]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[73]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[74]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[75]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[76]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[77]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[78]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[79]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[207]; acadoWorkspace.w1[8] = + acadoWorkspace.QN1[80]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[81]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[82]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[83]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[84]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[85]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[86]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[87]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[88]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[89]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[208]; acadoWorkspace.w1[9] = + acadoWorkspace.QN1[90]*acadoWorkspace.sbar[200] + acadoWorkspace.QN1[91]*acadoWorkspace.sbar[201] + acadoWorkspace.QN1[92]*acadoWorkspace.sbar[202] + acadoWorkspace.QN1[93]*acadoWorkspace.sbar[203] + acadoWorkspace.QN1[94]*acadoWorkspace.sbar[204] + acadoWorkspace.QN1[95]*acadoWorkspace.sbar[205] + acadoWorkspace.QN1[96]*acadoWorkspace.sbar[206] + acadoWorkspace.QN1[97]*acadoWorkspace.sbar[207] + acadoWorkspace.QN1[98]*acadoWorkspace.sbar[208] + acadoWorkspace.QN1[99]*acadoWorkspace.sbar[209] + acadoWorkspace.QDy[209]; acado_macBTw1( &(acadoWorkspace.evGu[ 760 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 76 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1900 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 190 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1900 ]), &(acadoWorkspace.sbar[ 190 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 720 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 72 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1800 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 180 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1800 ]), &(acadoWorkspace.sbar[ 180 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 680 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 68 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1700 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 170 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1700 ]), &(acadoWorkspace.sbar[ 170 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 640 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 64 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1600 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 160 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1600 ]), &(acadoWorkspace.sbar[ 160 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 600 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 60 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1500 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 150 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1500 ]), &(acadoWorkspace.sbar[ 150 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 560 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 56 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1400 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 140 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1400 ]), &(acadoWorkspace.sbar[ 140 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 520 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 52 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1300 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 130 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1300 ]), &(acadoWorkspace.sbar[ 130 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 480 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 48 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1200 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 120 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1200 ]), &(acadoWorkspace.sbar[ 120 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 440 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 44 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1100 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 110 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1100 ]), &(acadoWorkspace.sbar[ 110 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 400 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 40 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 1000 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 100 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 1000 ]), &(acadoWorkspace.sbar[ 100 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 360 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 36 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 900 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 90 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 900 ]), &(acadoWorkspace.sbar[ 90 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 320 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 32 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 800 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 80 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 800 ]), &(acadoWorkspace.sbar[ 80 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 280 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 28 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 700 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 70 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 700 ]), &(acadoWorkspace.sbar[ 70 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 240 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 24 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 600 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 60 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 600 ]), &(acadoWorkspace.sbar[ 60 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 200 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 20 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 500 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 50 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 500 ]), &(acadoWorkspace.sbar[ 50 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 160 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 16 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 400 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 40 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 400 ]), &(acadoWorkspace.sbar[ 40 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 120 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 12 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 300 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 30 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 300 ]), &(acadoWorkspace.sbar[ 30 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 80 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 8 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 200 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 20 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 200 ]), &(acadoWorkspace.sbar[ 20 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( &(acadoWorkspace.evGu[ 40 ]), acadoWorkspace.w1, &(acadoWorkspace.g[ 4 ]) ); acado_macATw1QDy( &(acadoWorkspace.evGx[ 100 ]), acadoWorkspace.w1, &(acadoWorkspace.QDy[ 10 ]), acadoWorkspace.w2 ); acado_macQSbarW2( &(acadoWorkspace.Q1[ 100 ]), &(acadoWorkspace.sbar[ 10 ]), acadoWorkspace.w2, acadoWorkspace.w1 ); acado_macBTw1( acadoWorkspace.evGu, acadoWorkspace.w1, acadoWorkspace.g ); acadoWorkspace.lb[0] = acadoVariables.lbValues[0] - acadoVariables.u[0]; acadoWorkspace.lb[1] = acadoVariables.lbValues[1] - acadoVariables.u[1]; acadoWorkspace.lb[2] = acadoVariables.lbValues[2] - acadoVariables.u[2]; acadoWorkspace.lb[3] = acadoVariables.lbValues[3] - acadoVariables.u[3]; acadoWorkspace.lb[4] = acadoVariables.lbValues[4] - acadoVariables.u[4]; acadoWorkspace.lb[5] = acadoVariables.lbValues[5] - acadoVariables.u[5]; acadoWorkspace.lb[6] = acadoVariables.lbValues[6] - acadoVariables.u[6]; acadoWorkspace.lb[7] = acadoVariables.lbValues[7] - acadoVariables.u[7]; acadoWorkspace.lb[8] = acadoVariables.lbValues[8] - acadoVariables.u[8]; acadoWorkspace.lb[9] = acadoVariables.lbValues[9] - acadoVariables.u[9]; acadoWorkspace.lb[10] = acadoVariables.lbValues[10] - acadoVariables.u[10]; acadoWorkspace.lb[11] = acadoVariables.lbValues[11] - acadoVariables.u[11]; acadoWorkspace.lb[12] = acadoVariables.lbValues[12] - acadoVariables.u[12]; acadoWorkspace.lb[13] = acadoVariables.lbValues[13] - acadoVariables.u[13]; acadoWorkspace.lb[14] = acadoVariables.lbValues[14] - acadoVariables.u[14]; acadoWorkspace.lb[15] = acadoVariables.lbValues[15] - acadoVariables.u[15]; acadoWorkspace.lb[16] = acadoVariables.lbValues[16] - acadoVariables.u[16]; acadoWorkspace.lb[17] = acadoVariables.lbValues[17] - acadoVariables.u[17]; acadoWorkspace.lb[18] = acadoVariables.lbValues[18] - acadoVariables.u[18]; acadoWorkspace.lb[19] = acadoVariables.lbValues[19] - acadoVariables.u[19]; acadoWorkspace.lb[20] = acadoVariables.lbValues[20] - acadoVariables.u[20]; acadoWorkspace.lb[21] = acadoVariables.lbValues[21] - acadoVariables.u[21]; acadoWorkspace.lb[22] = acadoVariables.lbValues[22] - acadoVariables.u[22]; acadoWorkspace.lb[23] = acadoVariables.lbValues[23] - acadoVariables.u[23]; acadoWorkspace.lb[24] = acadoVariables.lbValues[24] - acadoVariables.u[24]; acadoWorkspace.lb[25] = acadoVariables.lbValues[25] - acadoVariables.u[25]; acadoWorkspace.lb[26] = acadoVariables.lbValues[26] - acadoVariables.u[26]; acadoWorkspace.lb[27] = acadoVariables.lbValues[27] - acadoVariables.u[27]; acadoWorkspace.lb[28] = acadoVariables.lbValues[28] - acadoVariables.u[28]; acadoWorkspace.lb[29] = acadoVariables.lbValues[29] - acadoVariables.u[29]; acadoWorkspace.lb[30] = acadoVariables.lbValues[30] - acadoVariables.u[30]; acadoWorkspace.lb[31] = acadoVariables.lbValues[31] - acadoVariables.u[31]; acadoWorkspace.lb[32] = acadoVariables.lbValues[32] - acadoVariables.u[32]; acadoWorkspace.lb[33] = acadoVariables.lbValues[33] - acadoVariables.u[33]; acadoWorkspace.lb[34] = acadoVariables.lbValues[34] - acadoVariables.u[34]; acadoWorkspace.lb[35] = acadoVariables.lbValues[35] - acadoVariables.u[35]; acadoWorkspace.lb[36] = acadoVariables.lbValues[36] - acadoVariables.u[36]; acadoWorkspace.lb[37] = acadoVariables.lbValues[37] - acadoVariables.u[37]; acadoWorkspace.lb[38] = acadoVariables.lbValues[38] - acadoVariables.u[38]; acadoWorkspace.lb[39] = acadoVariables.lbValues[39] - acadoVariables.u[39]; acadoWorkspace.lb[40] = acadoVariables.lbValues[40] - acadoVariables.u[40]; acadoWorkspace.lb[41] = acadoVariables.lbValues[41] - acadoVariables.u[41]; acadoWorkspace.lb[42] = acadoVariables.lbValues[42] - acadoVariables.u[42]; acadoWorkspace.lb[43] = acadoVariables.lbValues[43] - acadoVariables.u[43]; acadoWorkspace.lb[44] = acadoVariables.lbValues[44] - acadoVariables.u[44]; acadoWorkspace.lb[45] = acadoVariables.lbValues[45] - acadoVariables.u[45]; acadoWorkspace.lb[46] = acadoVariables.lbValues[46] - acadoVariables.u[46]; acadoWorkspace.lb[47] = acadoVariables.lbValues[47] - acadoVariables.u[47]; acadoWorkspace.lb[48] = acadoVariables.lbValues[48] - acadoVariables.u[48]; acadoWorkspace.lb[49] = acadoVariables.lbValues[49] - acadoVariables.u[49]; acadoWorkspace.lb[50] = acadoVariables.lbValues[50] - acadoVariables.u[50]; acadoWorkspace.lb[51] = acadoVariables.lbValues[51] - acadoVariables.u[51]; acadoWorkspace.lb[52] = acadoVariables.lbValues[52] - acadoVariables.u[52]; acadoWorkspace.lb[53] = acadoVariables.lbValues[53] - acadoVariables.u[53]; acadoWorkspace.lb[54] = acadoVariables.lbValues[54] - acadoVariables.u[54]; acadoWorkspace.lb[55] = acadoVariables.lbValues[55] - acadoVariables.u[55]; acadoWorkspace.lb[56] = acadoVariables.lbValues[56] - acadoVariables.u[56]; acadoWorkspace.lb[57] = acadoVariables.lbValues[57] - acadoVariables.u[57]; acadoWorkspace.lb[58] = acadoVariables.lbValues[58] - acadoVariables.u[58]; acadoWorkspace.lb[59] = acadoVariables.lbValues[59] - acadoVariables.u[59]; acadoWorkspace.lb[60] = acadoVariables.lbValues[60] - acadoVariables.u[60]; acadoWorkspace.lb[61] = acadoVariables.lbValues[61] - acadoVariables.u[61]; acadoWorkspace.lb[62] = acadoVariables.lbValues[62] - acadoVariables.u[62]; acadoWorkspace.lb[63] = acadoVariables.lbValues[63] - acadoVariables.u[63]; acadoWorkspace.lb[64] = acadoVariables.lbValues[64] - acadoVariables.u[64]; acadoWorkspace.lb[65] = acadoVariables.lbValues[65] - acadoVariables.u[65]; acadoWorkspace.lb[66] = acadoVariables.lbValues[66] - acadoVariables.u[66]; acadoWorkspace.lb[67] = acadoVariables.lbValues[67] - acadoVariables.u[67]; acadoWorkspace.lb[68] = acadoVariables.lbValues[68] - acadoVariables.u[68]; acadoWorkspace.lb[69] = acadoVariables.lbValues[69] - acadoVariables.u[69]; acadoWorkspace.lb[70] = acadoVariables.lbValues[70] - acadoVariables.u[70]; acadoWorkspace.lb[71] = acadoVariables.lbValues[71] - acadoVariables.u[71]; acadoWorkspace.lb[72] = acadoVariables.lbValues[72] - acadoVariables.u[72]; acadoWorkspace.lb[73] = acadoVariables.lbValues[73] - acadoVariables.u[73]; acadoWorkspace.lb[74] = acadoVariables.lbValues[74] - acadoVariables.u[74]; acadoWorkspace.lb[75] = acadoVariables.lbValues[75] - acadoVariables.u[75]; acadoWorkspace.lb[76] = acadoVariables.lbValues[76] - acadoVariables.u[76]; acadoWorkspace.lb[77] = acadoVariables.lbValues[77] - acadoVariables.u[77]; acadoWorkspace.lb[78] = acadoVariables.lbValues[78] - acadoVariables.u[78]; acadoWorkspace.lb[79] = acadoVariables.lbValues[79] - acadoVariables.u[79]; acadoWorkspace.ub[0] = acadoVariables.ubValues[0] - acadoVariables.u[0]; acadoWorkspace.ub[1] = acadoVariables.ubValues[1] - acadoVariables.u[1]; acadoWorkspace.ub[2] = acadoVariables.ubValues[2] - acadoVariables.u[2]; acadoWorkspace.ub[3] = acadoVariables.ubValues[3] - acadoVariables.u[3]; acadoWorkspace.ub[4] = acadoVariables.ubValues[4] - acadoVariables.u[4]; acadoWorkspace.ub[5] = acadoVariables.ubValues[5] - acadoVariables.u[5]; acadoWorkspace.ub[6] = acadoVariables.ubValues[6] - acadoVariables.u[6]; acadoWorkspace.ub[7] = acadoVariables.ubValues[7] - acadoVariables.u[7]; acadoWorkspace.ub[8] = acadoVariables.ubValues[8] - acadoVariables.u[8]; acadoWorkspace.ub[9] = acadoVariables.ubValues[9] - acadoVariables.u[9]; acadoWorkspace.ub[10] = acadoVariables.ubValues[10] - acadoVariables.u[10]; acadoWorkspace.ub[11] = acadoVariables.ubValues[11] - acadoVariables.u[11]; acadoWorkspace.ub[12] = acadoVariables.ubValues[12] - acadoVariables.u[12]; acadoWorkspace.ub[13] = acadoVariables.ubValues[13] - acadoVariables.u[13]; acadoWorkspace.ub[14] = acadoVariables.ubValues[14] - acadoVariables.u[14]; acadoWorkspace.ub[15] = acadoVariables.ubValues[15] - acadoVariables.u[15]; acadoWorkspace.ub[16] = acadoVariables.ubValues[16] - acadoVariables.u[16]; acadoWorkspace.ub[17] = acadoVariables.ubValues[17] - acadoVariables.u[17]; acadoWorkspace.ub[18] = acadoVariables.ubValues[18] - acadoVariables.u[18]; acadoWorkspace.ub[19] = acadoVariables.ubValues[19] - acadoVariables.u[19]; acadoWorkspace.ub[20] = acadoVariables.ubValues[20] - acadoVariables.u[20]; acadoWorkspace.ub[21] = acadoVariables.ubValues[21] - acadoVariables.u[21]; acadoWorkspace.ub[22] = acadoVariables.ubValues[22] - acadoVariables.u[22]; acadoWorkspace.ub[23] = acadoVariables.ubValues[23] - acadoVariables.u[23]; acadoWorkspace.ub[24] = acadoVariables.ubValues[24] - acadoVariables.u[24]; acadoWorkspace.ub[25] = acadoVariables.ubValues[25] - acadoVariables.u[25]; acadoWorkspace.ub[26] = acadoVariables.ubValues[26] - acadoVariables.u[26]; acadoWorkspace.ub[27] = acadoVariables.ubValues[27] - acadoVariables.u[27]; acadoWorkspace.ub[28] = acadoVariables.ubValues[28] - acadoVariables.u[28]; acadoWorkspace.ub[29] = acadoVariables.ubValues[29] - acadoVariables.u[29]; acadoWorkspace.ub[30] = acadoVariables.ubValues[30] - acadoVariables.u[30]; acadoWorkspace.ub[31] = acadoVariables.ubValues[31] - acadoVariables.u[31]; acadoWorkspace.ub[32] = acadoVariables.ubValues[32] - acadoVariables.u[32]; acadoWorkspace.ub[33] = acadoVariables.ubValues[33] - acadoVariables.u[33]; acadoWorkspace.ub[34] = acadoVariables.ubValues[34] - acadoVariables.u[34]; acadoWorkspace.ub[35] = acadoVariables.ubValues[35] - acadoVariables.u[35]; acadoWorkspace.ub[36] = acadoVariables.ubValues[36] - acadoVariables.u[36]; acadoWorkspace.ub[37] = acadoVariables.ubValues[37] - acadoVariables.u[37]; acadoWorkspace.ub[38] = acadoVariables.ubValues[38] - acadoVariables.u[38]; acadoWorkspace.ub[39] = acadoVariables.ubValues[39] - acadoVariables.u[39]; acadoWorkspace.ub[40] = acadoVariables.ubValues[40] - acadoVariables.u[40]; acadoWorkspace.ub[41] = acadoVariables.ubValues[41] - acadoVariables.u[41]; acadoWorkspace.ub[42] = acadoVariables.ubValues[42] - acadoVariables.u[42]; acadoWorkspace.ub[43] = acadoVariables.ubValues[43] - acadoVariables.u[43]; acadoWorkspace.ub[44] = acadoVariables.ubValues[44] - acadoVariables.u[44]; acadoWorkspace.ub[45] = acadoVariables.ubValues[45] - acadoVariables.u[45]; acadoWorkspace.ub[46] = acadoVariables.ubValues[46] - acadoVariables.u[46]; acadoWorkspace.ub[47] = acadoVariables.ubValues[47] - acadoVariables.u[47]; acadoWorkspace.ub[48] = acadoVariables.ubValues[48] - acadoVariables.u[48]; acadoWorkspace.ub[49] = acadoVariables.ubValues[49] - acadoVariables.u[49]; acadoWorkspace.ub[50] = acadoVariables.ubValues[50] - acadoVariables.u[50]; acadoWorkspace.ub[51] = acadoVariables.ubValues[51] - acadoVariables.u[51]; acadoWorkspace.ub[52] = acadoVariables.ubValues[52] - acadoVariables.u[52]; acadoWorkspace.ub[53] = acadoVariables.ubValues[53] - acadoVariables.u[53]; acadoWorkspace.ub[54] = acadoVariables.ubValues[54] - acadoVariables.u[54]; acadoWorkspace.ub[55] = acadoVariables.ubValues[55] - acadoVariables.u[55]; acadoWorkspace.ub[56] = acadoVariables.ubValues[56] - acadoVariables.u[56]; acadoWorkspace.ub[57] = acadoVariables.ubValues[57] - acadoVariables.u[57]; acadoWorkspace.ub[58] = acadoVariables.ubValues[58] - acadoVariables.u[58]; acadoWorkspace.ub[59] = acadoVariables.ubValues[59] - acadoVariables.u[59]; acadoWorkspace.ub[60] = acadoVariables.ubValues[60] - acadoVariables.u[60]; acadoWorkspace.ub[61] = acadoVariables.ubValues[61] - acadoVariables.u[61]; acadoWorkspace.ub[62] = acadoVariables.ubValues[62] - acadoVariables.u[62]; acadoWorkspace.ub[63] = acadoVariables.ubValues[63] - acadoVariables.u[63]; acadoWorkspace.ub[64] = acadoVariables.ubValues[64] - acadoVariables.u[64]; acadoWorkspace.ub[65] = acadoVariables.ubValues[65] - acadoVariables.u[65]; acadoWorkspace.ub[66] = acadoVariables.ubValues[66] - acadoVariables.u[66]; acadoWorkspace.ub[67] = acadoVariables.ubValues[67] - acadoVariables.u[67]; acadoWorkspace.ub[68] = acadoVariables.ubValues[68] - acadoVariables.u[68]; acadoWorkspace.ub[69] = acadoVariables.ubValues[69] - acadoVariables.u[69]; acadoWorkspace.ub[70] = acadoVariables.ubValues[70] - acadoVariables.u[70]; acadoWorkspace.ub[71] = acadoVariables.ubValues[71] - acadoVariables.u[71]; acadoWorkspace.ub[72] = acadoVariables.ubValues[72] - acadoVariables.u[72]; acadoWorkspace.ub[73] = acadoVariables.ubValues[73] - acadoVariables.u[73]; acadoWorkspace.ub[74] = acadoVariables.ubValues[74] - acadoVariables.u[74]; acadoWorkspace.ub[75] = acadoVariables.ubValues[75] - acadoVariables.u[75]; acadoWorkspace.ub[76] = acadoVariables.ubValues[76] - acadoVariables.u[76]; acadoWorkspace.ub[77] = acadoVariables.ubValues[77] - acadoVariables.u[77]; acadoWorkspace.ub[78] = acadoVariables.ubValues[78] - acadoVariables.u[78]; acadoWorkspace.ub[79] = acadoVariables.ubValues[79] - acadoVariables.u[79]; acado_macHxd( acadoWorkspace.evHx, acadoWorkspace.sbar, acadoWorkspace.lbA, acadoWorkspace.ubA ); acado_macHxd( &(acadoWorkspace.evHx[ 10 ]), &(acadoWorkspace.sbar[ 10 ]), &(acadoWorkspace.lbA[ 1 ]), &(acadoWorkspace.ubA[ 1 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 20 ]), &(acadoWorkspace.sbar[ 20 ]), &(acadoWorkspace.lbA[ 2 ]), &(acadoWorkspace.ubA[ 2 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 30 ]), &(acadoWorkspace.sbar[ 30 ]), &(acadoWorkspace.lbA[ 3 ]), &(acadoWorkspace.ubA[ 3 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 40 ]), &(acadoWorkspace.sbar[ 40 ]), &(acadoWorkspace.lbA[ 4 ]), &(acadoWorkspace.ubA[ 4 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 50 ]), &(acadoWorkspace.sbar[ 50 ]), &(acadoWorkspace.lbA[ 5 ]), &(acadoWorkspace.ubA[ 5 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 60 ]), &(acadoWorkspace.sbar[ 60 ]), &(acadoWorkspace.lbA[ 6 ]), &(acadoWorkspace.ubA[ 6 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 70 ]), &(acadoWorkspace.sbar[ 70 ]), &(acadoWorkspace.lbA[ 7 ]), &(acadoWorkspace.ubA[ 7 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 80 ]), &(acadoWorkspace.sbar[ 80 ]), &(acadoWorkspace.lbA[ 8 ]), &(acadoWorkspace.ubA[ 8 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 90 ]), &(acadoWorkspace.sbar[ 90 ]), &(acadoWorkspace.lbA[ 9 ]), &(acadoWorkspace.ubA[ 9 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 100 ]), &(acadoWorkspace.sbar[ 100 ]), &(acadoWorkspace.lbA[ 10 ]), &(acadoWorkspace.ubA[ 10 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 110 ]), &(acadoWorkspace.sbar[ 110 ]), &(acadoWorkspace.lbA[ 11 ]), &(acadoWorkspace.ubA[ 11 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 120 ]), &(acadoWorkspace.sbar[ 120 ]), &(acadoWorkspace.lbA[ 12 ]), &(acadoWorkspace.ubA[ 12 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 130 ]), &(acadoWorkspace.sbar[ 130 ]), &(acadoWorkspace.lbA[ 13 ]), &(acadoWorkspace.ubA[ 13 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 140 ]), &(acadoWorkspace.sbar[ 140 ]), &(acadoWorkspace.lbA[ 14 ]), &(acadoWorkspace.ubA[ 14 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 150 ]), &(acadoWorkspace.sbar[ 150 ]), &(acadoWorkspace.lbA[ 15 ]), &(acadoWorkspace.ubA[ 15 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 160 ]), &(acadoWorkspace.sbar[ 160 ]), &(acadoWorkspace.lbA[ 16 ]), &(acadoWorkspace.ubA[ 16 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 170 ]), &(acadoWorkspace.sbar[ 170 ]), &(acadoWorkspace.lbA[ 17 ]), &(acadoWorkspace.ubA[ 17 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 180 ]), &(acadoWorkspace.sbar[ 180 ]), &(acadoWorkspace.lbA[ 18 ]), &(acadoWorkspace.ubA[ 18 ]) ); acado_macHxd( &(acadoWorkspace.evHx[ 190 ]), &(acadoWorkspace.sbar[ 190 ]), &(acadoWorkspace.lbA[ 19 ]), &(acadoWorkspace.ubA[ 19 ]) ); } void acado_expand( ) { int lRun1; acadoVariables.u[0] += acadoWorkspace.x[0]; acadoVariables.u[1] += acadoWorkspace.x[1]; acadoVariables.u[2] += acadoWorkspace.x[2]; acadoVariables.u[3] += acadoWorkspace.x[3]; acadoVariables.u[4] += acadoWorkspace.x[4]; acadoVariables.u[5] += acadoWorkspace.x[5]; acadoVariables.u[6] += acadoWorkspace.x[6]; acadoVariables.u[7] += acadoWorkspace.x[7]; acadoVariables.u[8] += acadoWorkspace.x[8]; acadoVariables.u[9] += acadoWorkspace.x[9]; acadoVariables.u[10] += acadoWorkspace.x[10]; acadoVariables.u[11] += acadoWorkspace.x[11]; acadoVariables.u[12] += acadoWorkspace.x[12]; acadoVariables.u[13] += acadoWorkspace.x[13]; acadoVariables.u[14] += acadoWorkspace.x[14]; acadoVariables.u[15] += acadoWorkspace.x[15]; acadoVariables.u[16] += acadoWorkspace.x[16]; acadoVariables.u[17] += acadoWorkspace.x[17]; acadoVariables.u[18] += acadoWorkspace.x[18]; acadoVariables.u[19] += acadoWorkspace.x[19]; acadoVariables.u[20] += acadoWorkspace.x[20]; acadoVariables.u[21] += acadoWorkspace.x[21]; acadoVariables.u[22] += acadoWorkspace.x[22]; acadoVariables.u[23] += acadoWorkspace.x[23]; acadoVariables.u[24] += acadoWorkspace.x[24]; acadoVariables.u[25] += acadoWorkspace.x[25]; acadoVariables.u[26] += acadoWorkspace.x[26]; acadoVariables.u[27] += acadoWorkspace.x[27]; acadoVariables.u[28] += acadoWorkspace.x[28]; acadoVariables.u[29] += acadoWorkspace.x[29]; acadoVariables.u[30] += acadoWorkspace.x[30]; acadoVariables.u[31] += acadoWorkspace.x[31]; acadoVariables.u[32] += acadoWorkspace.x[32]; acadoVariables.u[33] += acadoWorkspace.x[33]; acadoVariables.u[34] += acadoWorkspace.x[34]; acadoVariables.u[35] += acadoWorkspace.x[35]; acadoVariables.u[36] += acadoWorkspace.x[36]; acadoVariables.u[37] += acadoWorkspace.x[37]; acadoVariables.u[38] += acadoWorkspace.x[38]; acadoVariables.u[39] += acadoWorkspace.x[39]; acadoVariables.u[40] += acadoWorkspace.x[40]; acadoVariables.u[41] += acadoWorkspace.x[41]; acadoVariables.u[42] += acadoWorkspace.x[42]; acadoVariables.u[43] += acadoWorkspace.x[43]; acadoVariables.u[44] += acadoWorkspace.x[44]; acadoVariables.u[45] += acadoWorkspace.x[45]; acadoVariables.u[46] += acadoWorkspace.x[46]; acadoVariables.u[47] += acadoWorkspace.x[47]; acadoVariables.u[48] += acadoWorkspace.x[48]; acadoVariables.u[49] += acadoWorkspace.x[49]; acadoVariables.u[50] += acadoWorkspace.x[50]; acadoVariables.u[51] += acadoWorkspace.x[51]; acadoVariables.u[52] += acadoWorkspace.x[52]; acadoVariables.u[53] += acadoWorkspace.x[53]; acadoVariables.u[54] += acadoWorkspace.x[54]; acadoVariables.u[55] += acadoWorkspace.x[55]; acadoVariables.u[56] += acadoWorkspace.x[56]; acadoVariables.u[57] += acadoWorkspace.x[57]; acadoVariables.u[58] += acadoWorkspace.x[58]; acadoVariables.u[59] += acadoWorkspace.x[59]; acadoVariables.u[60] += acadoWorkspace.x[60]; acadoVariables.u[61] += acadoWorkspace.x[61]; acadoVariables.u[62] += acadoWorkspace.x[62]; acadoVariables.u[63] += acadoWorkspace.x[63]; acadoVariables.u[64] += acadoWorkspace.x[64]; acadoVariables.u[65] += acadoWorkspace.x[65]; acadoVariables.u[66] += acadoWorkspace.x[66]; acadoVariables.u[67] += acadoWorkspace.x[67]; acadoVariables.u[68] += acadoWorkspace.x[68]; acadoVariables.u[69] += acadoWorkspace.x[69]; acadoVariables.u[70] += acadoWorkspace.x[70]; acadoVariables.u[71] += acadoWorkspace.x[71]; acadoVariables.u[72] += acadoWorkspace.x[72]; acadoVariables.u[73] += acadoWorkspace.x[73]; acadoVariables.u[74] += acadoWorkspace.x[74]; acadoVariables.u[75] += acadoWorkspace.x[75]; acadoVariables.u[76] += acadoWorkspace.x[76]; acadoVariables.u[77] += acadoWorkspace.x[77]; acadoVariables.u[78] += acadoWorkspace.x[78]; acadoVariables.u[79] += acadoWorkspace.x[79]; acadoWorkspace.sbar[0] = acadoWorkspace.Dx0[0]; acadoWorkspace.sbar[1] = acadoWorkspace.Dx0[1]; acadoWorkspace.sbar[2] = acadoWorkspace.Dx0[2]; acadoWorkspace.sbar[3] = acadoWorkspace.Dx0[3]; acadoWorkspace.sbar[4] = acadoWorkspace.Dx0[4]; acadoWorkspace.sbar[5] = acadoWorkspace.Dx0[5]; acadoWorkspace.sbar[6] = acadoWorkspace.Dx0[6]; acadoWorkspace.sbar[7] = acadoWorkspace.Dx0[7]; acadoWorkspace.sbar[8] = acadoWorkspace.Dx0[8]; acadoWorkspace.sbar[9] = acadoWorkspace.Dx0[9]; for (lRun1 = 0; lRun1 < 200; ++lRun1) acadoWorkspace.sbar[lRun1 + 10] = acadoWorkspace.d[lRun1]; acado_expansionStep( acadoWorkspace.evGx, acadoWorkspace.evGu, acadoWorkspace.x, acadoWorkspace.sbar, &(acadoWorkspace.sbar[ 10 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 100 ]), &(acadoWorkspace.evGu[ 40 ]), &(acadoWorkspace.x[ 4 ]), &(acadoWorkspace.sbar[ 10 ]), &(acadoWorkspace.sbar[ 20 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 200 ]), &(acadoWorkspace.evGu[ 80 ]), &(acadoWorkspace.x[ 8 ]), &(acadoWorkspace.sbar[ 20 ]), &(acadoWorkspace.sbar[ 30 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 300 ]), &(acadoWorkspace.evGu[ 120 ]), &(acadoWorkspace.x[ 12 ]), &(acadoWorkspace.sbar[ 30 ]), &(acadoWorkspace.sbar[ 40 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 400 ]), &(acadoWorkspace.evGu[ 160 ]), &(acadoWorkspace.x[ 16 ]), &(acadoWorkspace.sbar[ 40 ]), &(acadoWorkspace.sbar[ 50 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 500 ]), &(acadoWorkspace.evGu[ 200 ]), &(acadoWorkspace.x[ 20 ]), &(acadoWorkspace.sbar[ 50 ]), &(acadoWorkspace.sbar[ 60 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 600 ]), &(acadoWorkspace.evGu[ 240 ]), &(acadoWorkspace.x[ 24 ]), &(acadoWorkspace.sbar[ 60 ]), &(acadoWorkspace.sbar[ 70 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 700 ]), &(acadoWorkspace.evGu[ 280 ]), &(acadoWorkspace.x[ 28 ]), &(acadoWorkspace.sbar[ 70 ]), &(acadoWorkspace.sbar[ 80 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 800 ]), &(acadoWorkspace.evGu[ 320 ]), &(acadoWorkspace.x[ 32 ]), &(acadoWorkspace.sbar[ 80 ]), &(acadoWorkspace.sbar[ 90 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 900 ]), &(acadoWorkspace.evGu[ 360 ]), &(acadoWorkspace.x[ 36 ]), &(acadoWorkspace.sbar[ 90 ]), &(acadoWorkspace.sbar[ 100 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1000 ]), &(acadoWorkspace.evGu[ 400 ]), &(acadoWorkspace.x[ 40 ]), &(acadoWorkspace.sbar[ 100 ]), &(acadoWorkspace.sbar[ 110 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1100 ]), &(acadoWorkspace.evGu[ 440 ]), &(acadoWorkspace.x[ 44 ]), &(acadoWorkspace.sbar[ 110 ]), &(acadoWorkspace.sbar[ 120 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1200 ]), &(acadoWorkspace.evGu[ 480 ]), &(acadoWorkspace.x[ 48 ]), &(acadoWorkspace.sbar[ 120 ]), &(acadoWorkspace.sbar[ 130 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1300 ]), &(acadoWorkspace.evGu[ 520 ]), &(acadoWorkspace.x[ 52 ]), &(acadoWorkspace.sbar[ 130 ]), &(acadoWorkspace.sbar[ 140 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1400 ]), &(acadoWorkspace.evGu[ 560 ]), &(acadoWorkspace.x[ 56 ]), &(acadoWorkspace.sbar[ 140 ]), &(acadoWorkspace.sbar[ 150 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1500 ]), &(acadoWorkspace.evGu[ 600 ]), &(acadoWorkspace.x[ 60 ]), &(acadoWorkspace.sbar[ 150 ]), &(acadoWorkspace.sbar[ 160 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1600 ]), &(acadoWorkspace.evGu[ 640 ]), &(acadoWorkspace.x[ 64 ]), &(acadoWorkspace.sbar[ 160 ]), &(acadoWorkspace.sbar[ 170 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1700 ]), &(acadoWorkspace.evGu[ 680 ]), &(acadoWorkspace.x[ 68 ]), &(acadoWorkspace.sbar[ 170 ]), &(acadoWorkspace.sbar[ 180 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1800 ]), &(acadoWorkspace.evGu[ 720 ]), &(acadoWorkspace.x[ 72 ]), &(acadoWorkspace.sbar[ 180 ]), &(acadoWorkspace.sbar[ 190 ]) ); acado_expansionStep( &(acadoWorkspace.evGx[ 1900 ]), &(acadoWorkspace.evGu[ 760 ]), &(acadoWorkspace.x[ 76 ]), &(acadoWorkspace.sbar[ 190 ]), &(acadoWorkspace.sbar[ 200 ]) ); for (lRun1 = 0; lRun1 < 210; ++lRun1) acadoVariables.x[lRun1] += acadoWorkspace.sbar[lRun1]; } int acado_preparationStep( ) { int ret; ret = acado_modelSimulation(); acado_evaluateObjective( ); acado_condensePrep( ); return ret; } int acado_feedbackStep( ) { int tmp; acado_condenseFdb( ); tmp = acado_solve( ); acado_expand( ); return tmp; } int acado_initializeSolver( ) { int ret; /* This is a function which must be called once before any other function call! */ ret = 0; memset(&acadoWorkspace, 0, sizeof( acadoWorkspace )); acadoVariables.lbValues[0] = 1.9620000000000002e+00; acadoVariables.lbValues[1] = -2.5000000000000000e+00; acadoVariables.lbValues[2] = -2.5000000000000000e+00; acadoVariables.lbValues[3] = -1.0000000000000000e-03; acadoVariables.lbValues[4] = 1.9620000000000002e+00; acadoVariables.lbValues[5] = -2.5000000000000000e+00; acadoVariables.lbValues[6] = -2.5000000000000000e+00; acadoVariables.lbValues[7] = -1.0000000000000000e-03; acadoVariables.lbValues[8] = 1.9620000000000002e+00; acadoVariables.lbValues[9] = -2.5000000000000000e+00; acadoVariables.lbValues[10] = -2.5000000000000000e+00; acadoVariables.lbValues[11] = -1.0000000000000000e-03; acadoVariables.lbValues[12] = 1.9620000000000002e+00; acadoVariables.lbValues[13] = -2.5000000000000000e+00; acadoVariables.lbValues[14] = -2.5000000000000000e+00; acadoVariables.lbValues[15] = -1.0000000000000000e-03; acadoVariables.lbValues[16] = 1.9620000000000002e+00; acadoVariables.lbValues[17] = -2.5000000000000000e+00; acadoVariables.lbValues[18] = -2.5000000000000000e+00; acadoVariables.lbValues[19] = -1.0000000000000000e-03; acadoVariables.lbValues[20] = 1.9620000000000002e+00; acadoVariables.lbValues[21] = -2.5000000000000000e+00; acadoVariables.lbValues[22] = -2.5000000000000000e+00; acadoVariables.lbValues[23] = -1.0000000000000000e-03; acadoVariables.lbValues[24] = 1.9620000000000002e+00; acadoVariables.lbValues[25] = -2.5000000000000000e+00; acadoVariables.lbValues[26] = -2.5000000000000000e+00; acadoVariables.lbValues[27] = -1.0000000000000000e-03; acadoVariables.lbValues[28] = 1.9620000000000002e+00; acadoVariables.lbValues[29] = -2.5000000000000000e+00; acadoVariables.lbValues[30] = -2.5000000000000000e+00; acadoVariables.lbValues[31] = -1.0000000000000000e-03; acadoVariables.lbValues[32] = 1.9620000000000002e+00; acadoVariables.lbValues[33] = -2.5000000000000000e+00; acadoVariables.lbValues[34] = -2.5000000000000000e+00; acadoVariables.lbValues[35] = -1.0000000000000000e-03; acadoVariables.lbValues[36] = 1.9620000000000002e+00; acadoVariables.lbValues[37] = -2.5000000000000000e+00; acadoVariables.lbValues[38] = -2.5000000000000000e+00; acadoVariables.lbValues[39] = -1.0000000000000000e-03; acadoVariables.lbValues[40] = 1.9620000000000002e+00; acadoVariables.lbValues[41] = -2.5000000000000000e+00; acadoVariables.lbValues[42] = -2.5000000000000000e+00; acadoVariables.lbValues[43] = -1.0000000000000000e-03; acadoVariables.lbValues[44] = 1.9620000000000002e+00; acadoVariables.lbValues[45] = -2.5000000000000000e+00; acadoVariables.lbValues[46] = -2.5000000000000000e+00; acadoVariables.lbValues[47] = -1.0000000000000000e-03; acadoVariables.lbValues[48] = 1.9620000000000002e+00; acadoVariables.lbValues[49] = -2.5000000000000000e+00; acadoVariables.lbValues[50] = -2.5000000000000000e+00; acadoVariables.lbValues[51] = -1.0000000000000000e-03; acadoVariables.lbValues[52] = 1.9620000000000002e+00; acadoVariables.lbValues[53] = -2.5000000000000000e+00; acadoVariables.lbValues[54] = -2.5000000000000000e+00; acadoVariables.lbValues[55] = -1.0000000000000000e-03; acadoVariables.lbValues[56] = 1.9620000000000002e+00; acadoVariables.lbValues[57] = -2.5000000000000000e+00; acadoVariables.lbValues[58] = -2.5000000000000000e+00; acadoVariables.lbValues[59] = -1.0000000000000000e-03; acadoVariables.lbValues[60] = 1.9620000000000002e+00; acadoVariables.lbValues[61] = -2.5000000000000000e+00; acadoVariables.lbValues[62] = -2.5000000000000000e+00; acadoVariables.lbValues[63] = -1.0000000000000000e-03; acadoVariables.lbValues[64] = 1.9620000000000002e+00; acadoVariables.lbValues[65] = -2.5000000000000000e+00; acadoVariables.lbValues[66] = -2.5000000000000000e+00; acadoVariables.lbValues[67] = -1.0000000000000000e-03; acadoVariables.lbValues[68] = 1.9620000000000002e+00; acadoVariables.lbValues[69] = -2.5000000000000000e+00; acadoVariables.lbValues[70] = -2.5000000000000000e+00; acadoVariables.lbValues[71] = -1.0000000000000000e-03; acadoVariables.lbValues[72] = 1.9620000000000002e+00; acadoVariables.lbValues[73] = -2.5000000000000000e+00; acadoVariables.lbValues[74] = -2.5000000000000000e+00; acadoVariables.lbValues[75] = -1.0000000000000000e-03; acadoVariables.lbValues[76] = 1.9620000000000002e+00; acadoVariables.lbValues[77] = -2.5000000000000000e+00; acadoVariables.lbValues[78] = -2.5000000000000000e+00; acadoVariables.lbValues[79] = -1.0000000000000000e-03; acadoVariables.ubValues[0] = 1.9620000000000001e+01; acadoVariables.ubValues[1] = 2.5000000000000000e+00; acadoVariables.ubValues[2] = 2.5000000000000000e+00; acadoVariables.ubValues[3] = 1.0000000000000000e-03; acadoVariables.ubValues[4] = 1.9620000000000001e+01; acadoVariables.ubValues[5] = 2.5000000000000000e+00; acadoVariables.ubValues[6] = 2.5000000000000000e+00; acadoVariables.ubValues[7] = 1.0000000000000000e-03; acadoVariables.ubValues[8] = 1.9620000000000001e+01; acadoVariables.ubValues[9] = 2.5000000000000000e+00; acadoVariables.ubValues[10] = 2.5000000000000000e+00; acadoVariables.ubValues[11] = 1.0000000000000000e-03; acadoVariables.ubValues[12] = 1.9620000000000001e+01; acadoVariables.ubValues[13] = 2.5000000000000000e+00; acadoVariables.ubValues[14] = 2.5000000000000000e+00; acadoVariables.ubValues[15] = 1.0000000000000000e-03; acadoVariables.ubValues[16] = 1.9620000000000001e+01; acadoVariables.ubValues[17] = 2.5000000000000000e+00; acadoVariables.ubValues[18] = 2.5000000000000000e+00; acadoVariables.ubValues[19] = 1.0000000000000000e-03; acadoVariables.ubValues[20] = 1.9620000000000001e+01; acadoVariables.ubValues[21] = 2.5000000000000000e+00; acadoVariables.ubValues[22] = 2.5000000000000000e+00; acadoVariables.ubValues[23] = 1.0000000000000000e-03; acadoVariables.ubValues[24] = 1.9620000000000001e+01; acadoVariables.ubValues[25] = 2.5000000000000000e+00; acadoVariables.ubValues[26] = 2.5000000000000000e+00; acadoVariables.ubValues[27] = 1.0000000000000000e-03; acadoVariables.ubValues[28] = 1.9620000000000001e+01; acadoVariables.ubValues[29] = 2.5000000000000000e+00; acadoVariables.ubValues[30] = 2.5000000000000000e+00; acadoVariables.ubValues[31] = 1.0000000000000000e-03; acadoVariables.ubValues[32] = 1.9620000000000001e+01; acadoVariables.ubValues[33] = 2.5000000000000000e+00; acadoVariables.ubValues[34] = 2.5000000000000000e+00; acadoVariables.ubValues[35] = 1.0000000000000000e-03; acadoVariables.ubValues[36] = 1.9620000000000001e+01; acadoVariables.ubValues[37] = 2.5000000000000000e+00; acadoVariables.ubValues[38] = 2.5000000000000000e+00; acadoVariables.ubValues[39] = 1.0000000000000000e-03; acadoVariables.ubValues[40] = 1.9620000000000001e+01; acadoVariables.ubValues[41] = 2.5000000000000000e+00; acadoVariables.ubValues[42] = 2.5000000000000000e+00; acadoVariables.ubValues[43] = 1.0000000000000000e-03; acadoVariables.ubValues[44] = 1.9620000000000001e+01; acadoVariables.ubValues[45] = 2.5000000000000000e+00; acadoVariables.ubValues[46] = 2.5000000000000000e+00; acadoVariables.ubValues[47] = 1.0000000000000000e-03; acadoVariables.ubValues[48] = 1.9620000000000001e+01; acadoVariables.ubValues[49] = 2.5000000000000000e+00; acadoVariables.ubValues[50] = 2.5000000000000000e+00; acadoVariables.ubValues[51] = 1.0000000000000000e-03; acadoVariables.ubValues[52] = 1.9620000000000001e+01; acadoVariables.ubValues[53] = 2.5000000000000000e+00; acadoVariables.ubValues[54] = 2.5000000000000000e+00; acadoVariables.ubValues[55] = 1.0000000000000000e-03; acadoVariables.ubValues[56] = 1.9620000000000001e+01; acadoVariables.ubValues[57] = 2.5000000000000000e+00; acadoVariables.ubValues[58] = 2.5000000000000000e+00; acadoVariables.ubValues[59] = 1.0000000000000000e-03; acadoVariables.ubValues[60] = 1.9620000000000001e+01; acadoVariables.ubValues[61] = 2.5000000000000000e+00; acadoVariables.ubValues[62] = 2.5000000000000000e+00; acadoVariables.ubValues[63] = 1.0000000000000000e-03; acadoVariables.ubValues[64] = 1.9620000000000001e+01; acadoVariables.ubValues[65] = 2.5000000000000000e+00; acadoVariables.ubValues[66] = 2.5000000000000000e+00; acadoVariables.ubValues[67] = 1.0000000000000000e-03; acadoVariables.ubValues[68] = 1.9620000000000001e+01; acadoVariables.ubValues[69] = 2.5000000000000000e+00; acadoVariables.ubValues[70] = 2.5000000000000000e+00; acadoVariables.ubValues[71] = 1.0000000000000000e-03; acadoVariables.ubValues[72] = 1.9620000000000001e+01; acadoVariables.ubValues[73] = 2.5000000000000000e+00; acadoVariables.ubValues[74] = 2.5000000000000000e+00; acadoVariables.ubValues[75] = 1.0000000000000000e-03; acadoVariables.ubValues[76] = 1.9620000000000001e+01; acadoVariables.ubValues[77] = 2.5000000000000000e+00; acadoVariables.ubValues[78] = 2.5000000000000000e+00; acadoVariables.ubValues[79] = 1.0000000000000000e-03; acadoVariables.lbAValues[0] = -1.0000000000000000e+12; acadoVariables.lbAValues[1] = -1.0000000000000000e+12; acadoVariables.lbAValues[2] = -1.0000000000000000e+12; acadoVariables.lbAValues[3] = -1.0000000000000000e+12; acadoVariables.lbAValues[4] = -1.0000000000000000e+12; acadoVariables.lbAValues[5] = -1.0000000000000000e+12; acadoVariables.lbAValues[6] = -1.0000000000000000e+12; acadoVariables.lbAValues[7] = -1.0000000000000000e+12; acadoVariables.lbAValues[8] = -1.0000000000000000e+12; acadoVariables.lbAValues[9] = -1.0000000000000000e+12; acadoVariables.lbAValues[10] = -1.0000000000000000e+12; acadoVariables.lbAValues[11] = -1.0000000000000000e+12; acadoVariables.lbAValues[12] = -1.0000000000000000e+12; acadoVariables.lbAValues[13] = -1.0000000000000000e+12; acadoVariables.lbAValues[14] = -1.0000000000000000e+12; acadoVariables.lbAValues[15] = -1.0000000000000000e+12; acadoVariables.lbAValues[16] = -1.0000000000000000e+12; acadoVariables.lbAValues[17] = -1.0000000000000000e+12; acadoVariables.lbAValues[18] = -1.0000000000000000e+12; acadoVariables.lbAValues[19] = -1.0000000000000000e+12; acadoVariables.ubAValues[0] = 1.9620000000000001e+01; acadoVariables.ubAValues[1] = 1.9620000000000001e+01; acadoVariables.ubAValues[2] = 1.9620000000000001e+01; acadoVariables.ubAValues[3] = 1.9620000000000001e+01; acadoVariables.ubAValues[4] = 1.9620000000000001e+01; acadoVariables.ubAValues[5] = 1.9620000000000001e+01; acadoVariables.ubAValues[6] = 1.9620000000000001e+01; acadoVariables.ubAValues[7] = 1.9620000000000001e+01; acadoVariables.ubAValues[8] = 1.9620000000000001e+01; acadoVariables.ubAValues[9] = 1.9620000000000001e+01; acadoVariables.ubAValues[10] = 1.9620000000000001e+01; acadoVariables.ubAValues[11] = 1.9620000000000001e+01; acadoVariables.ubAValues[12] = 1.9620000000000001e+01; acadoVariables.ubAValues[13] = 1.9620000000000001e+01; acadoVariables.ubAValues[14] = 1.9620000000000001e+01; acadoVariables.ubAValues[15] = 1.9620000000000001e+01; acadoVariables.ubAValues[16] = 1.9620000000000001e+01; acadoVariables.ubAValues[17] = 1.9620000000000001e+01; acadoVariables.ubAValues[18] = 1.9620000000000001e+01; acadoVariables.ubAValues[19] = 1.9620000000000001e+01; return ret; } void acado_initializeNodesByForwardSimulation( ) { int index; for (index = 0; index < 20; ++index) { state[0] = acadoVariables.x[index * 10]; state[1] = acadoVariables.x[index * 10 + 1]; state[2] = acadoVariables.x[index * 10 + 2]; state[3] = acadoVariables.x[index * 10 + 3]; state[4] = acadoVariables.x[index * 10 + 4]; state[5] = acadoVariables.x[index * 10 + 5]; state[6] = acadoVariables.x[index * 10 + 6]; state[7] = acadoVariables.x[index * 10 + 7]; state[8] = acadoVariables.x[index * 10 + 8]; state[9] = acadoVariables.x[index * 10 + 9]; state[150] = acadoVariables.u[index * 4]; state[151] = acadoVariables.u[index * 4 + 1]; state[152] = acadoVariables.u[index * 4 + 2]; state[153] = acadoVariables.u[index * 4 + 3]; state[154] = acadoVariables.od[index * 10]; state[155] = acadoVariables.od[index * 10 + 1]; state[156] = acadoVariables.od[index * 10 + 2]; state[157] = acadoVariables.od[index * 10 + 3]; state[158] = acadoVariables.od[index * 10 + 4]; state[159] = acadoVariables.od[index * 10 + 5]; state[160] = acadoVariables.od[index * 10 + 6]; state[161] = acadoVariables.od[index * 10 + 7]; state[162] = acadoVariables.od[index * 10 + 8]; state[163] = acadoVariables.od[index * 10 + 9]; acado_integrate(state, index == 0); acadoVariables.x[index * 10 + 10] = state[0]; acadoVariables.x[index * 10 + 11] = state[1]; acadoVariables.x[index * 10 + 12] = state[2]; acadoVariables.x[index * 10 + 13] = state[3]; acadoVariables.x[index * 10 + 14] = state[4]; acadoVariables.x[index * 10 + 15] = state[5]; acadoVariables.x[index * 10 + 16] = state[6]; acadoVariables.x[index * 10 + 17] = state[7]; acadoVariables.x[index * 10 + 18] = state[8]; acadoVariables.x[index * 10 + 19] = state[9]; } } void acado_shiftStates( int strategy, real_t* const xEnd, real_t* const uEnd ) { int index; for (index = 0; index < 20; ++index) { acadoVariables.x[index * 10] = acadoVariables.x[index * 10 + 10]; acadoVariables.x[index * 10 + 1] = acadoVariables.x[index * 10 + 11]; acadoVariables.x[index * 10 + 2] = acadoVariables.x[index * 10 + 12]; acadoVariables.x[index * 10 + 3] = acadoVariables.x[index * 10 + 13]; acadoVariables.x[index * 10 + 4] = acadoVariables.x[index * 10 + 14]; acadoVariables.x[index * 10 + 5] = acadoVariables.x[index * 10 + 15]; acadoVariables.x[index * 10 + 6] = acadoVariables.x[index * 10 + 16]; acadoVariables.x[index * 10 + 7] = acadoVariables.x[index * 10 + 17]; acadoVariables.x[index * 10 + 8] = acadoVariables.x[index * 10 + 18]; acadoVariables.x[index * 10 + 9] = acadoVariables.x[index * 10 + 19]; } if (strategy == 1 && xEnd != 0) { acadoVariables.x[200] = xEnd[0]; acadoVariables.x[201] = xEnd[1]; acadoVariables.x[202] = xEnd[2]; acadoVariables.x[203] = xEnd[3]; acadoVariables.x[204] = xEnd[4]; acadoVariables.x[205] = xEnd[5]; acadoVariables.x[206] = xEnd[6]; acadoVariables.x[207] = xEnd[7]; acadoVariables.x[208] = xEnd[8]; acadoVariables.x[209] = xEnd[9]; } else if (strategy == 2) { state[0] = acadoVariables.x[200]; state[1] = acadoVariables.x[201]; state[2] = acadoVariables.x[202]; state[3] = acadoVariables.x[203]; state[4] = acadoVariables.x[204]; state[5] = acadoVariables.x[205]; state[6] = acadoVariables.x[206]; state[7] = acadoVariables.x[207]; state[8] = acadoVariables.x[208]; state[9] = acadoVariables.x[209]; if (uEnd != 0) { state[150] = uEnd[0]; state[151] = uEnd[1]; state[152] = uEnd[2]; state[153] = uEnd[3]; } else { state[150] = acadoVariables.u[76]; state[151] = acadoVariables.u[77]; state[152] = acadoVariables.u[78]; state[153] = acadoVariables.u[79]; } state[154] = acadoVariables.od[200]; state[155] = acadoVariables.od[201]; state[156] = acadoVariables.od[202]; state[157] = acadoVariables.od[203]; state[158] = acadoVariables.od[204]; state[159] = acadoVariables.od[205]; state[160] = acadoVariables.od[206]; state[161] = acadoVariables.od[207]; state[162] = acadoVariables.od[208]; state[163] = acadoVariables.od[209]; acado_integrate(state, 1); acadoVariables.x[200] = state[0]; acadoVariables.x[201] = state[1]; acadoVariables.x[202] = state[2]; acadoVariables.x[203] = state[3]; acadoVariables.x[204] = state[4]; acadoVariables.x[205] = state[5]; acadoVariables.x[206] = state[6]; acadoVariables.x[207] = state[7]; acadoVariables.x[208] = state[8]; acadoVariables.x[209] = state[9]; } } void acado_shiftControls( real_t* const uEnd ) { int index; for (index = 0; index < 19; ++index) { acadoVariables.u[index * 4] = acadoVariables.u[index * 4 + 4]; acadoVariables.u[index * 4 + 1] = acadoVariables.u[index * 4 + 5]; acadoVariables.u[index * 4 + 2] = acadoVariables.u[index * 4 + 6]; acadoVariables.u[index * 4 + 3] = acadoVariables.u[index * 4 + 7]; } if (uEnd != 0) { acadoVariables.u[76] = uEnd[0]; acadoVariables.u[77] = uEnd[1]; acadoVariables.u[78] = uEnd[2]; acadoVariables.u[79] = uEnd[3]; } } real_t acado_getKKT( ) { real_t kkt; int index; real_t prd; kkt = + acadoWorkspace.g[0]*acadoWorkspace.x[0] + acadoWorkspace.g[1]*acadoWorkspace.x[1] + acadoWorkspace.g[2]*acadoWorkspace.x[2] + acadoWorkspace.g[3]*acadoWorkspace.x[3] + acadoWorkspace.g[4]*acadoWorkspace.x[4] + acadoWorkspace.g[5]*acadoWorkspace.x[5] + acadoWorkspace.g[6]*acadoWorkspace.x[6] + acadoWorkspace.g[7]*acadoWorkspace.x[7] + acadoWorkspace.g[8]*acadoWorkspace.x[8] + acadoWorkspace.g[9]*acadoWorkspace.x[9] + acadoWorkspace.g[10]*acadoWorkspace.x[10] + acadoWorkspace.g[11]*acadoWorkspace.x[11] + acadoWorkspace.g[12]*acadoWorkspace.x[12] + acadoWorkspace.g[13]*acadoWorkspace.x[13] + acadoWorkspace.g[14]*acadoWorkspace.x[14] + acadoWorkspace.g[15]*acadoWorkspace.x[15] + acadoWorkspace.g[16]*acadoWorkspace.x[16] + acadoWorkspace.g[17]*acadoWorkspace.x[17] + acadoWorkspace.g[18]*acadoWorkspace.x[18] + acadoWorkspace.g[19]*acadoWorkspace.x[19] + acadoWorkspace.g[20]*acadoWorkspace.x[20] + acadoWorkspace.g[21]*acadoWorkspace.x[21] + acadoWorkspace.g[22]*acadoWorkspace.x[22] + acadoWorkspace.g[23]*acadoWorkspace.x[23] + acadoWorkspace.g[24]*acadoWorkspace.x[24] + acadoWorkspace.g[25]*acadoWorkspace.x[25] + acadoWorkspace.g[26]*acadoWorkspace.x[26] + acadoWorkspace.g[27]*acadoWorkspace.x[27] + acadoWorkspace.g[28]*acadoWorkspace.x[28] + acadoWorkspace.g[29]*acadoWorkspace.x[29] + acadoWorkspace.g[30]*acadoWorkspace.x[30] + acadoWorkspace.g[31]*acadoWorkspace.x[31] + acadoWorkspace.g[32]*acadoWorkspace.x[32] + acadoWorkspace.g[33]*acadoWorkspace.x[33] + acadoWorkspace.g[34]*acadoWorkspace.x[34] + acadoWorkspace.g[35]*acadoWorkspace.x[35] + acadoWorkspace.g[36]*acadoWorkspace.x[36] + acadoWorkspace.g[37]*acadoWorkspace.x[37] + acadoWorkspace.g[38]*acadoWorkspace.x[38] + acadoWorkspace.g[39]*acadoWorkspace.x[39] + acadoWorkspace.g[40]*acadoWorkspace.x[40] + acadoWorkspace.g[41]*acadoWorkspace.x[41] + acadoWorkspace.g[42]*acadoWorkspace.x[42] + acadoWorkspace.g[43]*acadoWorkspace.x[43] + acadoWorkspace.g[44]*acadoWorkspace.x[44] + acadoWorkspace.g[45]*acadoWorkspace.x[45] + acadoWorkspace.g[46]*acadoWorkspace.x[46] + acadoWorkspace.g[47]*acadoWorkspace.x[47] + acadoWorkspace.g[48]*acadoWorkspace.x[48] + acadoWorkspace.g[49]*acadoWorkspace.x[49] + acadoWorkspace.g[50]*acadoWorkspace.x[50] + acadoWorkspace.g[51]*acadoWorkspace.x[51] + acadoWorkspace.g[52]*acadoWorkspace.x[52] + acadoWorkspace.g[53]*acadoWorkspace.x[53] + acadoWorkspace.g[54]*acadoWorkspace.x[54] + acadoWorkspace.g[55]*acadoWorkspace.x[55] + acadoWorkspace.g[56]*acadoWorkspace.x[56] + acadoWorkspace.g[57]*acadoWorkspace.x[57] + acadoWorkspace.g[58]*acadoWorkspace.x[58] + acadoWorkspace.g[59]*acadoWorkspace.x[59] + acadoWorkspace.g[60]*acadoWorkspace.x[60] + acadoWorkspace.g[61]*acadoWorkspace.x[61] + acadoWorkspace.g[62]*acadoWorkspace.x[62] + acadoWorkspace.g[63]*acadoWorkspace.x[63] + acadoWorkspace.g[64]*acadoWorkspace.x[64] + acadoWorkspace.g[65]*acadoWorkspace.x[65] + acadoWorkspace.g[66]*acadoWorkspace.x[66] + acadoWorkspace.g[67]*acadoWorkspace.x[67] + acadoWorkspace.g[68]*acadoWorkspace.x[68] + acadoWorkspace.g[69]*acadoWorkspace.x[69] + acadoWorkspace.g[70]*acadoWorkspace.x[70] + acadoWorkspace.g[71]*acadoWorkspace.x[71] + acadoWorkspace.g[72]*acadoWorkspace.x[72] + acadoWorkspace.g[73]*acadoWorkspace.x[73] + acadoWorkspace.g[74]*acadoWorkspace.x[74] + acadoWorkspace.g[75]*acadoWorkspace.x[75] + acadoWorkspace.g[76]*acadoWorkspace.x[76] + acadoWorkspace.g[77]*acadoWorkspace.x[77] + acadoWorkspace.g[78]*acadoWorkspace.x[78] + acadoWorkspace.g[79]*acadoWorkspace.x[79]; kkt = fabs( kkt ); for (index = 0; index < 80; ++index) { prd = acadoWorkspace.y[index]; if (prd > 1e-12) kkt += fabs(acadoWorkspace.lb[index] * prd); else if (prd < -1e-12) kkt += fabs(acadoWorkspace.ub[index] * prd); } for (index = 0; index < 20; ++index) { prd = acadoWorkspace.y[index + 80]; if (prd > 1e-12) kkt += fabs(acadoWorkspace.lbA[index] * prd); else if (prd < -1e-12) kkt += fabs(acadoWorkspace.ubA[index] * prd); } return kkt; } real_t acado_getObjective( ) { real_t objVal; int lRun1; /** Row vector of size: 14 */ real_t tmpDy[ 14 ]; /** Row vector of size: 10 */ real_t tmpDyN[ 10 ]; for (lRun1 = 0; lRun1 < 20; ++lRun1) { acadoWorkspace.objValueIn[0] = acadoVariables.x[lRun1 * 10]; acadoWorkspace.objValueIn[1] = acadoVariables.x[lRun1 * 10 + 1]; acadoWorkspace.objValueIn[2] = acadoVariables.x[lRun1 * 10 + 2]; acadoWorkspace.objValueIn[3] = acadoVariables.x[lRun1 * 10 + 3]; acadoWorkspace.objValueIn[4] = acadoVariables.x[lRun1 * 10 + 4]; acadoWorkspace.objValueIn[5] = acadoVariables.x[lRun1 * 10 + 5]; acadoWorkspace.objValueIn[6] = acadoVariables.x[lRun1 * 10 + 6]; acadoWorkspace.objValueIn[7] = acadoVariables.x[lRun1 * 10 + 7]; acadoWorkspace.objValueIn[8] = acadoVariables.x[lRun1 * 10 + 8]; acadoWorkspace.objValueIn[9] = acadoVariables.x[lRun1 * 10 + 9]; acadoWorkspace.objValueIn[10] = acadoVariables.u[lRun1 * 4]; acadoWorkspace.objValueIn[11] = acadoVariables.u[lRun1 * 4 + 1]; acadoWorkspace.objValueIn[12] = acadoVariables.u[lRun1 * 4 + 2]; acadoWorkspace.objValueIn[13] = acadoVariables.u[lRun1 * 4 + 3]; acadoWorkspace.objValueIn[14] = acadoVariables.od[lRun1 * 10]; acadoWorkspace.objValueIn[15] = acadoVariables.od[lRun1 * 10 + 1]; acadoWorkspace.objValueIn[16] = acadoVariables.od[lRun1 * 10 + 2]; acadoWorkspace.objValueIn[17] = acadoVariables.od[lRun1 * 10 + 3]; acadoWorkspace.objValueIn[18] = acadoVariables.od[lRun1 * 10 + 4]; acadoWorkspace.objValueIn[19] = acadoVariables.od[lRun1 * 10 + 5]; acadoWorkspace.objValueIn[20] = acadoVariables.od[lRun1 * 10 + 6]; acadoWorkspace.objValueIn[21] = acadoVariables.od[lRun1 * 10 + 7]; acadoWorkspace.objValueIn[22] = acadoVariables.od[lRun1 * 10 + 8]; acadoWorkspace.objValueIn[23] = acadoVariables.od[lRun1 * 10 + 9]; acado_evaluateLSQ( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut ); acadoWorkspace.Dy[lRun1 * 14] = acadoWorkspace.objValueOut[0] - acadoVariables.y[lRun1 * 14]; acadoWorkspace.Dy[lRun1 * 14 + 1] = acadoWorkspace.objValueOut[1] - acadoVariables.y[lRun1 * 14 + 1]; acadoWorkspace.Dy[lRun1 * 14 + 2] = acadoWorkspace.objValueOut[2] - acadoVariables.y[lRun1 * 14 + 2]; acadoWorkspace.Dy[lRun1 * 14 + 3] = acadoWorkspace.objValueOut[3] - acadoVariables.y[lRun1 * 14 + 3]; acadoWorkspace.Dy[lRun1 * 14 + 4] = acadoWorkspace.objValueOut[4] - acadoVariables.y[lRun1 * 14 + 4]; acadoWorkspace.Dy[lRun1 * 14 + 5] = acadoWorkspace.objValueOut[5] - acadoVariables.y[lRun1 * 14 + 5]; acadoWorkspace.Dy[lRun1 * 14 + 6] = acadoWorkspace.objValueOut[6] - acadoVariables.y[lRun1 * 14 + 6]; acadoWorkspace.Dy[lRun1 * 14 + 7] = acadoWorkspace.objValueOut[7] - acadoVariables.y[lRun1 * 14 + 7]; acadoWorkspace.Dy[lRun1 * 14 + 8] = acadoWorkspace.objValueOut[8] - acadoVariables.y[lRun1 * 14 + 8]; acadoWorkspace.Dy[lRun1 * 14 + 9] = acadoWorkspace.objValueOut[9] - acadoVariables.y[lRun1 * 14 + 9]; acadoWorkspace.Dy[lRun1 * 14 + 10] = acadoWorkspace.objValueOut[10] - acadoVariables.y[lRun1 * 14 + 10]; acadoWorkspace.Dy[lRun1 * 14 + 11] = acadoWorkspace.objValueOut[11] - acadoVariables.y[lRun1 * 14 + 11]; acadoWorkspace.Dy[lRun1 * 14 + 12] = acadoWorkspace.objValueOut[12] - acadoVariables.y[lRun1 * 14 + 12]; acadoWorkspace.Dy[lRun1 * 14 + 13] = acadoWorkspace.objValueOut[13] - acadoVariables.y[lRun1 * 14 + 13]; } acadoWorkspace.objValueIn[0] = acadoVariables.x[200]; acadoWorkspace.objValueIn[1] = acadoVariables.x[201]; acadoWorkspace.objValueIn[2] = acadoVariables.x[202]; acadoWorkspace.objValueIn[3] = acadoVariables.x[203]; acadoWorkspace.objValueIn[4] = acadoVariables.x[204]; acadoWorkspace.objValueIn[5] = acadoVariables.x[205]; acadoWorkspace.objValueIn[6] = acadoVariables.x[206]; acadoWorkspace.objValueIn[7] = acadoVariables.x[207]; acadoWorkspace.objValueIn[8] = acadoVariables.x[208]; acadoWorkspace.objValueIn[9] = acadoVariables.x[209]; acadoWorkspace.objValueIn[10] = acadoVariables.od[200]; acadoWorkspace.objValueIn[11] = acadoVariables.od[201]; acadoWorkspace.objValueIn[12] = acadoVariables.od[202]; acadoWorkspace.objValueIn[13] = acadoVariables.od[203]; acadoWorkspace.objValueIn[14] = acadoVariables.od[204]; acadoWorkspace.objValueIn[15] = acadoVariables.od[205]; acadoWorkspace.objValueIn[16] = acadoVariables.od[206]; acadoWorkspace.objValueIn[17] = acadoVariables.od[207]; acadoWorkspace.objValueIn[18] = acadoVariables.od[208]; acadoWorkspace.objValueIn[19] = acadoVariables.od[209]; acado_evaluateLSQEndTerm( acadoWorkspace.objValueIn, acadoWorkspace.objValueOut ); acadoWorkspace.DyN[0] = acadoWorkspace.objValueOut[0] - acadoVariables.yN[0]; acadoWorkspace.DyN[1] = acadoWorkspace.objValueOut[1] - acadoVariables.yN[1]; acadoWorkspace.DyN[2] = acadoWorkspace.objValueOut[2] - acadoVariables.yN[2]; acadoWorkspace.DyN[3] = acadoWorkspace.objValueOut[3] - acadoVariables.yN[3]; acadoWorkspace.DyN[4] = acadoWorkspace.objValueOut[4] - acadoVariables.yN[4]; acadoWorkspace.DyN[5] = acadoWorkspace.objValueOut[5] - acadoVariables.yN[5]; acadoWorkspace.DyN[6] = acadoWorkspace.objValueOut[6] - acadoVariables.yN[6]; acadoWorkspace.DyN[7] = acadoWorkspace.objValueOut[7] - acadoVariables.yN[7]; acadoWorkspace.DyN[8] = acadoWorkspace.objValueOut[8] - acadoVariables.yN[8]; acadoWorkspace.DyN[9] = acadoWorkspace.objValueOut[9] - acadoVariables.yN[9]; objVal = 0.0000000000000000e+00; for (lRun1 = 0; lRun1 < 20; ++lRun1) { tmpDy[0] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 14] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 28] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 42] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 56] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 70] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 84] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 98] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 112] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 126] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 140] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 154] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 168] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 182]; tmpDy[1] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 1] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 15] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 29] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 43] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 57] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 71] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 85] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 99] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 113] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 127] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 141] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 155] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 169] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 183]; tmpDy[2] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 2] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 16] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 30] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 44] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 58] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 72] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 86] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 100] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 114] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 128] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 142] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 156] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 170] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 184]; tmpDy[3] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 3] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 17] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 31] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 45] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 59] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 73] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 87] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 101] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 115] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 129] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 143] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 157] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 171] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 185]; tmpDy[4] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 4] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 18] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 32] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 46] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 60] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 74] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 88] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 102] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 116] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 130] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 144] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 158] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 172] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 186]; tmpDy[5] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 5] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 19] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 33] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 47] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 61] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 75] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 89] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 103] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 117] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 131] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 145] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 159] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 173] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 187]; tmpDy[6] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 6] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 20] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 34] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 48] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 62] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 76] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 90] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 104] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 118] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 132] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 146] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 160] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 174] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 188]; tmpDy[7] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 7] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 21] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 35] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 49] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 63] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 77] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 91] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 105] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 119] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 133] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 147] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 161] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 175] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 189]; tmpDy[8] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 8] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 22] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 36] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 50] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 64] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 78] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 92] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 106] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 120] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 134] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 148] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 162] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 176] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 190]; tmpDy[9] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 9] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 23] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 37] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 51] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 65] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 79] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 93] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 107] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 121] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 135] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 149] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 163] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 177] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 191]; tmpDy[10] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 10] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 24] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 38] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 52] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 66] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 80] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 94] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 108] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 122] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 136] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 150] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 164] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 178] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 192]; tmpDy[11] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 11] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 25] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 39] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 53] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 67] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 81] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 95] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 109] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 123] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 137] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 151] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 165] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 179] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 193]; tmpDy[12] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 12] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 26] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 40] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 54] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 68] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 82] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 96] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 110] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 124] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 138] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 152] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 166] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 180] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 194]; tmpDy[13] = + acadoWorkspace.Dy[lRun1 * 14]*acadoVariables.W[lRun1 * 196 + 13] + acadoWorkspace.Dy[lRun1 * 14 + 1]*acadoVariables.W[lRun1 * 196 + 27] + acadoWorkspace.Dy[lRun1 * 14 + 2]*acadoVariables.W[lRun1 * 196 + 41] + acadoWorkspace.Dy[lRun1 * 14 + 3]*acadoVariables.W[lRun1 * 196 + 55] + acadoWorkspace.Dy[lRun1 * 14 + 4]*acadoVariables.W[lRun1 * 196 + 69] + acadoWorkspace.Dy[lRun1 * 14 + 5]*acadoVariables.W[lRun1 * 196 + 83] + acadoWorkspace.Dy[lRun1 * 14 + 6]*acadoVariables.W[lRun1 * 196 + 97] + acadoWorkspace.Dy[lRun1 * 14 + 7]*acadoVariables.W[lRun1 * 196 + 111] + acadoWorkspace.Dy[lRun1 * 14 + 8]*acadoVariables.W[lRun1 * 196 + 125] + acadoWorkspace.Dy[lRun1 * 14 + 9]*acadoVariables.W[lRun1 * 196 + 139] + acadoWorkspace.Dy[lRun1 * 14 + 10]*acadoVariables.W[lRun1 * 196 + 153] + acadoWorkspace.Dy[lRun1 * 14 + 11]*acadoVariables.W[lRun1 * 196 + 167] + acadoWorkspace.Dy[lRun1 * 14 + 12]*acadoVariables.W[lRun1 * 196 + 181] + acadoWorkspace.Dy[lRun1 * 14 + 13]*acadoVariables.W[lRun1 * 196 + 195]; objVal += + acadoWorkspace.Dy[lRun1 * 14]*tmpDy[0] + acadoWorkspace.Dy[lRun1 * 14 + 1]*tmpDy[1] + acadoWorkspace.Dy[lRun1 * 14 + 2]*tmpDy[2] + acadoWorkspace.Dy[lRun1 * 14 + 3]*tmpDy[3] + acadoWorkspace.Dy[lRun1 * 14 + 4]*tmpDy[4] + acadoWorkspace.Dy[lRun1 * 14 + 5]*tmpDy[5] + acadoWorkspace.Dy[lRun1 * 14 + 6]*tmpDy[6] + acadoWorkspace.Dy[lRun1 * 14 + 7]*tmpDy[7] + acadoWorkspace.Dy[lRun1 * 14 + 8]*tmpDy[8] + acadoWorkspace.Dy[lRun1 * 14 + 9]*tmpDy[9] + acadoWorkspace.Dy[lRun1 * 14 + 10]*tmpDy[10] + acadoWorkspace.Dy[lRun1 * 14 + 11]*tmpDy[11] + acadoWorkspace.Dy[lRun1 * 14 + 12]*tmpDy[12] + acadoWorkspace.Dy[lRun1 * 14 + 13]*tmpDy[13]; } tmpDyN[0] = + acadoWorkspace.DyN[0]*acadoVariables.WN[0]; tmpDyN[1] = + acadoWorkspace.DyN[1]*acadoVariables.WN[11]; tmpDyN[2] = + acadoWorkspace.DyN[2]*acadoVariables.WN[22]; tmpDyN[3] = + acadoWorkspace.DyN[3]*acadoVariables.WN[33]; tmpDyN[4] = + acadoWorkspace.DyN[4]*acadoVariables.WN[44]; tmpDyN[5] = + acadoWorkspace.DyN[5]*acadoVariables.WN[55]; tmpDyN[6] = + acadoWorkspace.DyN[6]*acadoVariables.WN[66]; tmpDyN[7] = + acadoWorkspace.DyN[7]*acadoVariables.WN[77]; tmpDyN[8] = + acadoWorkspace.DyN[8]*acadoVariables.WN[88]; tmpDyN[9] = + acadoWorkspace.DyN[9]*acadoVariables.WN[99]; objVal += + acadoWorkspace.DyN[0]*tmpDyN[0] + acadoWorkspace.DyN[1]*tmpDyN[1] + acadoWorkspace.DyN[2]*tmpDyN[2] + acadoWorkspace.DyN[3]*tmpDyN[3] + acadoWorkspace.DyN[4]*tmpDyN[4] + acadoWorkspace.DyN[5]*tmpDyN[5] + acadoWorkspace.DyN[6]*tmpDyN[6] + acadoWorkspace.DyN[7]*tmpDyN[7] + acadoWorkspace.DyN[8]*tmpDyN[8] + acadoWorkspace.DyN[9]*tmpDyN[9]; objVal *= 0.5; return objVal; }
cglobals.h
#ifndef RTGLOBALS #define RTGLOBALS #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 #define WARP_SIZE 32 #define Z_ORDER_BLOCK_SIZE 16 #define CMP_RESULTS_BLOCK_SIZE 256 #define HRT_RAY_MISS 0xFFFFFFFE #define HRT_RAY_HIT 0xFFFFFFFF #define GMAXVARS 64 #define INVALID_TEXTURE 0xFFFFFFFE #define TEX_POINT_SAM 0x10000000 #define TEX_ALPHASRC_W 0x20000000 #define TEX_CLAMP_U 0x40000000 #define TEX_CLAMP_V 0x80000000 // they are related because data are storen in one int32 variable triAlphaTest // #define ALPHA_MATERIAL_MASK 0x00FFFFFF #define ALPHA_LIGHTMESH_MASK 0xFF000000 #define ALPHA_LIGHTMESH_SHIFT 24 #define ALPHA_OPACITY_TEX_HAPPEND 0x80000000 #define ALPHA_TRANSPARENCY_HAPPEND 0x40000000 #define TEXMATRIX_ID_MASK 0x00FFFFFF // for texture slots - 'color_texMatrixId' and e.t.c #define TEXSAMPLER_TYPE_MASK 0xFF000000 // for texture slots - 'color_texMatrixId' and e.t.c #ifndef M_PI #define M_PI 3.14159265358979323846f #endif #ifndef INV_PI #define INV_PI 0.31830988618379067154f #endif #ifndef INV_TWOPI #define INV_TWOPI 0.15915494309189533577f #endif #ifndef INV_FOURPI #define INV_FOURPI 0.07957747154594766788f #endif #ifndef DEG_TO_RAD #define DEG_TO_RAD (M_PI / 180.f) #endif #define GEPSILON 5e-6f #define DEPSILON 1e-20f #define DEPSILON2 1e-30f #define PEPSILON 0.025f #define PG_SCALE 1000.0f /** * These defines are for the QMC remap table to support different mappings in run time. * for example you may decide to map (0,1) to screen (x,y) and (2,3) to DOF (x,y) or * you may decide to map (0,1) to screen (x,y) and (2,3,4) to material sampling * if no mapping presents in the table (id == -1) then pseudo random should be used. * */ #define QMC_VAR_SCR_X 0 #define QMC_VAR_SCR_Y 1 #define QMC_VAR_DOF_X 2 #define QMC_VAR_DOF_Y 3 #define QMC_VAR_SRC_A 4 #define QMC_VAR_MAT_L 5 #define QMC_VAR_MAT_0 6 #define QMC_VAR_MAT_1 7 #define QMC_VAR_LGT_N 8 #define QMC_VAR_LGT_0 9 #define QMC_VAR_LGT_1 10 #define QMC_VAR_LGT_2 11 /** * Note that unlike QMC, MMLT don't use remap table. * These offsets are direct offsets in the ramdom vector table (in floats) * */ #define MMLT_HEAD_TOTAL_SIZE 12 // // [0-3] : LENS; 4 in total // #define MMLT_DIM_SCR_X 0 #define MMLT_DIM_SCR_Y 1 #define MMLT_DIM_DOF_X 2 #define MMLT_DIM_DOF_Y 3 // [4-10]: LIGHT; 7 in total // #define MMLT_DIM_LGT_X 4 #define MMLT_DIM_LGT_Y 5 #define MMLT_DIM_LGT_Z 6 #define MMLT_DIM_LGT_W 7 #define MMLT_DIM_LGT_X1 8 #define MMLT_DIM_LGT_Y1 9 #define MMLT_DIM_LGT_N 10 // [11] : SPLIT; // #define MMLT_DIM_SPLIT 11 #define MMLT_FLOATS_PER_MLAYER 7 #define MMLT_FLOATS_PER_SAMPLE 3 #define MMLT_FLOATS_PER_BOUNCE (MMLT_FLOATS_PER_SAMPLE + MMLT_FLOATS_PER_MLAYER) #define MMLT_COMPRESSED_F_PERB 6 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define KMLT_HEAD_SIZE 4 #define KMLT_PER_LIGHT 4 #define KMLT_PER_MATERIAL 6 enum MEGATEX_USAGE{ MEGATEX_SHADING = 1, MEGATEX_SHADING_HDR = 2, MEGATEX_NORMAL = 3, MEGATEX_OPACITY = 4, }; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef __CUDACC__ #else #ifdef OCL_COMPILER // OpenCL #define ALIGN_S(x) __attribute__ ((aligned (x))) #define __device__ #define IDH_CALL inline #define ID_CALL inline IDH_CALL ushort2 make_ushort2(ushort x, ushort y) { ushort2 res; res.x = x; res.y = y; return res; } IDH_CALL int2 make_int2(int a, int b) { int2 res; res.x = a; res.y = b; return res; } IDH_CALL int4 make_int4(int a, int b, int c, int d) { int4 res; res.x = a; res.y = b; res.z = c; res.w = d; return res; } #define GLOBAL_ID_X get_global_id(0) #define GLOBAL_ID_Y get_global_id(1) #define LOCAL_ID_X get_local_id(0) #define LOCAL_ID_Y get_local_id(1) #define _PACKED __attribute__ ((packed)) #define __device__ //#define SYNCTHREADS barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE) #define SYNCTHREADS_LOCAL barrier(CLK_LOCAL_MEM_FENCE) #define SYNCTHREADS_GLOBAL barrier(CLK_GLOBAL_MEM_FENCE) static inline float maxcomp(float3 v) { return fmax(v.x, fmax(v.y, v.z)); } #define NULL 0 static inline ushort4 make_ushort4(ushort a, ushort b, ushort c, ushort d) { ushort4 res; res.x = a; res.y = b; res.z = c; res.w = d; return res; } static inline void atomic_addf(volatile __global float *source, const float operand) { union { unsigned int intVal; float floatVal; } newVal; union { unsigned int intVal; float floatVal; } prevVal; do { prevVal.floatVal = *source; newVal.floatVal = prevVal.floatVal + operand; } while (atomic_cmpxchg((volatile global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal); } IDH_CALL float dot3 (const float4 u, const float4 v) { return (u.x*v.x + u.y*v.y + u.z*v.z); } typedef struct float4x3T { float4 row[3]; } float4x3; typedef struct float4x4T { float4 row[4]; } float4x4; typedef struct float3x3T { float3 row[3]; } float3x3; IDH_CALL float2 make_float2(float a, float b) { float2 res; res.x = a; res.y = b; return res; } IDH_CALL float3 make_float3(float a, float b, float c) { float3 res; res.x = a; res.y = b; res.z = c; return res; } IDH_CALL float4 make_float4(float a, float b, float c, float d) { float4 res; res.x = a; res.y = b; res.z = c; res.w = d; return res; } IDH_CALL float2 to_float2(float4 f4) { float2 res; res.x = f4.x; res.y = f4.y; return res; } IDH_CALL float3 to_float3(float4 f4) { float3 res; res.x = f4.x; res.y = f4.y; res.z = f4.z; return res; } IDH_CALL float4 to_float4(float3 v, float w) { float4 res; res.x = v.x; res.y = v.y; res.z = v.z; res.w = w; return res; } static inline float3 mul4x3(float4x4 m, float3 v) { float3 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z + m.row[0].w; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z + m.row[1].w; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z + m.row[2].w; return res; } static inline float3 mul3x3(float4x4 m, float3 v) { float3 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z; return res; } static inline float2 sincos2f(float a_value) { float cosVal; float sinVal = sincos(a_value, &cosVal); return make_float2(sinVal, cosVal); } #else // Common C++ #ifdef WIN32 #define ALIGN_S(x) __declspec(align(x)) #else #define ALIGN_S(x) __attribute__ ((aligned (x))) #endif #undef M_PI #define M_PI 3.14159265358979323846f #include "../../HydraAPI/hydra_api/LiteMath.h" using namespace HydraLiteMath; #include "../../HydraAPI/hydra_api/HR_HDRImage.h" typedef HydraRender::HDRImage4f HDRImage4f; typedef unsigned int uint; typedef unsigned short ushort; typedef struct float4x3T { float4 row[3]; } float4x3; typedef struct float3x3T { float3 row[3]; } float3x3; static inline float2 sincos2f(float a_value) { return make_float2(sin(a_value), cos(a_value)); } #define IDH_CALL static inline #define ID_CALL static inline #define __global #define __constant const #define __private #define __read_only typedef int image1d_t; typedef int image1d_buffer_t; typedef int image2d_t; typedef int sampler_t; const int CLK_NORMALIZED_COORDS_TRUE = 1; const int CLK_NORMALIZED_COORDS_FALSE = 2; const int CLK_ADDRESS_CLAMP = 4; const int CLK_FILTER_NEAREST = 8; const int CLK_FILTER_LINEAR = 16; const int CLK_ADDRESS_REPEAT = 32; #define COMMON_CPLUS_PLUS_CODE 1 static inline int as_int(float x) { return reinterpret_cast<int&> (x); } static inline float as_float(int x) { return reinterpret_cast<float&>(x); } #define _PACKED typedef unsigned short half; static inline void vstore_half(float data, size_t offset, __global half *p) { p[offset] = 0; } static inline float sign(float a) { return (a > 0.0f) ? 1.0f : -1.0f; } static inline int2 make_int2(int a, int b) { int2 res; res.x = a; res.y = b; return res; } using std::isinf; #define ENABLE_OPACITY_TEX 1 #define SHADOW_TRACE_COLORED_SHADOWS 1 #define ENABLE_BLINN 1 #include "globals_sys.h" #endif #endif typedef __global const int4* texture2d_t; #ifndef INFINITY #define INFINITY (1e38f) #endif ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef struct MatSampleT { float3 color; float3 direction; float pdf; int flags; } MatSample; enum FLAG_BITS{HRT_COMPUTE_SHADOWS = 1, HRT_DISABLE_SHADING = 2, HRT_DIRECT_LIGHT_MODE = 4, HRT_UNIFIED_IMAGE_SAMPLING = 8, HRT_PRODUCTION_IMAGE_SAMPLING = 16, // 256 coherent rays per pixel. HRT_USE_MIS = 32, HRT_DUMMY2 = 64, // !!!!!!!! DONT USE THIS FLAG !!!! UNKNOWN BUG, PT DOES NOT CONTRIBUTE TO SCREEN. HRT_STORE_SUBPIXELS = 128, HRT_FORWARD_TRACING = 256, /// tracing from light to eye; otherwise from eye to light. HRT_DRAW_LIGHT_LT = 512, HRT_3WAY_MIS_WEIGHTS = 1024, HRT_STORE_RAY_SAMPLES = 8192, HRT_ENABLE_MMLT = 16384, HRT_ENABLE_SBPT = 32768, HRT_INDIRECT_LIGHT_MODE = 65536, HRT_STUPID_PT_MODE = 65536*8, HRT_NO_RANDOM_LIGHTS_SELECT = 65536*16, HRT_DUMMY5 = 65536*32, // HRT_DUMMY6 = 65536*64, // tracing photons to form spetial photonmap to speed-up direct light sampling HRT_DUMMY7 = 65536*128, HRT_DUMMY8 = 65536*256, HRT_ENABLE_PT_CAUSTICS = 65536*2048, HRT_USE_BOTH_PHOTON_MAPS = 65536*4096, HRT_ENABLE_QMC_ONE_SEED = 65536*8192, // !!!!!!!! DONT MOVE THIS FLAG !!!! See random generator implementation HRT_ENABLE_COHERENT_PT = 65536*16384, }; enum VARIABLE_NAMES { // int vars // HRT_ENABLE_DOF = 0, HRT_QMC_VARIANT = 1, HRT_FIRST_BOUNCE_STORE_CACHE = 2, HRT_ENABLE_MRAYS_COUNTERS = 3, HRT_DEBUG_OUTPUT = 4, HRT_MEASURE_RAYS_TYPE = 5, HRT_BLACK_DIFFUSE_OFFSET = 6, HRT_STORE_SHADOW_COLOR_W = 7, HRT_WHITE_DIFFUSE_OFFSET = 8, HRT_TRACE_DEPTH = 9, HRT_PHOTONS_STORE_BOUNCE = 10, HRT_PHOTONS_GARTHER_BOUNCE = 11, HRT_RAYS_APPENDBUFFER_SIZE = 12, HRT_DIFFUSE_TRACE_DEPTH = 13, HRT_DISPLAY_IC_INTERMEDIATE = 14, HRT_PT_FILTER_TYPE = 15, HRT_ENABLE_BAKE = 16, HRT_SILENT_MODE = 17, HRT_VAR_ENABLE_RR = 18, HRT_RENDER_LAYER = 19, HRT_RENDER_LAYER_DEPTH = 20, HRT_IC_ENABLED = 21, HRT_IMAP_ENABLED = 22, HRT_SPHEREMAP_TEXID0 = 23, HRT_SPHEREMAP_TEXID1 = 24, HRT_USE_GAMMA_FOR_ENV = 25, HRT_HRT_SCENE_HAVE_PORTALS = 26, HRT_SPHEREMAP_TEXMATRIXID0 = 27, HRT_SPHEREMAP_TEXMATRIXID1 = 28, HRT_ENABLE_PATH_REGENERATE = 29, HRT_ENV_PDF_TABLE_ID = 30, HRT_MLT_MAX_NUMBERS = 31, HRT_MLT_ITERS_MULT = 32, HRT_MMLT_BURN_ITERS = 33, HRT_MMLT_FIRST_BOUNCE = 34, HRT_SHADOW_MATTE_BACK = 35, HRT_MAX_SAMPLES_PER_PIXEL = 36, HRT_CONTRIB_SAMPLES = 37, HRT_BOX_MODE_ON = 38, HRT_KMLT_OR_QMC_LGT_BOUNCES = 39, HRT_KMLT_OR_QMC_MAT_BOUNCES = 40, }; enum VARIABLE_FLOAT_NAMES{ // float vars // HRT_DOF_LENS_RADIUS = 0, HRT_DOF_FOCAL_PLANE_DIST = 1, HRT_TILT_ROT_X = 2, HRT_TRACE_PROCEEDINGS_TRESHOLD = 3, HRT_TILT_ROT_Y = 4, HRT_CAUSTIC_POWER_MULT = 5, HRT_IMAGE_GAMMA = 6, HRT_TEXINPUT_GAMMA = 7, HRT_ENV_COLOR_X = 8, HRT_ENV_COLOR_Y = 9, HRT_ENV_COLOR_Z = 10, HRT_ENV_COLOR2_X = 11, HRT_ENV_COLOR2_Y = 12, HRT_ENV_COLOR2_Z = 13, HRT_CAM_FOV = 14, HRT_PATH_TRACE_ERROR = 15, HRT_ENV_CLAMPING = 16, HRT_BSDF_CLAMPING = 17, HRT_BSPHERE_CENTER_X = 18, HRT_BSPHERE_CENTER_Y = 19, HRT_BSPHERE_CENTER_Z = 20, HRT_BSPHERE_RADIUS = 21, HRT_GVOXEL_SIZE = 22, HRT_FOV_X = 23, // viewport parameters HRT_FOV_Y = 24, HRT_WIDTH_F = 25, HRT_HEIGHT_F = 26, HRT_ABLOW_OFFSET_X = 27, HRT_ABLOW_OFFSET_Y = 28, HRT_ABLOW_SCALE_X = 29, HRT_ABLOW_SCALE_Y = 30, HRT_MMLT_IMPLICIT_FIXED_PROB = 31, HRT_MMLT_STEP_SIZE_POWER = 32, HRT_MMLT_STEP_SIZE_COEFF = 33, HRT_MLT_SCREEN_SCALE_X = 34, HRT_MLT_SCREEN_SCALE_Y = 35, HRT_BACK_TEXINPUT_GAMMA = 36, }; enum RENDER_LAYER { LAYER_COLOR = 0, LAYER_POSITIONS = 1, LAYER_NORMALS = 2, LAYER_TEXCOORD = 3, LAYER_TEXCOLOR_AND_MATERIAL = 4, // material mask LAYER_INCOMING_PRIMARY = 5, // incoming primary LAYER_INCOMING_RADIANCE = 6, // incoming secondary LAYER_COLOR_PRIMARY_AND_REST = 7, // primary + refractions and other bounces LAYER_COLOR_THE_REST = 8, LAYER_PRIMARY = 9, LAYER_SECONDARY = 10 }; // refractions, and other bounces /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// IDH_CALL uint ZIndex(ushort x, ushort y, __constant ushort* a_mortonTable256) { return (a_mortonTable256[y >> 8] << 17) | (a_mortonTable256[x >> 8] << 16) | (a_mortonTable256[y & 0xFF] << 1 ) | (a_mortonTable256[x & 0xFF] ); } IDH_CALL ushort ExtractFromZIndex3D(uint zIndex, int stride) { uint result = 0; for (int i = 0; i < 10; i++) { int bitBask = 1 << (3 * i + stride); int bit = (bitBask & zIndex) ? 1 : 0; result |= (bit << i); } return (ushort)result; } IDH_CALL ushort ExtractFromZIndex2D(uint zIndex, int stride) { uint result = 0; for (int i = 0; i < 16; i++) { int bitBask = 1 << (2 * i + stride); int bit = (bitBask & zIndex) ? 1 : 0; result |= (bit << i); } return (ushort)result; } IDH_CALL ushort ExtractXFromZIndex(uint zIndex) { uint result = 0; for (int i = 0; i<16; i++) result |= ((1 << (2 * i)) & zIndex) >> i; return (ushort)result; } IDH_CALL ushort ExtractYFromZIndex(uint zIndex) { uint result = 0; for (int i = 0; i<16; i++) result |= ((1 << (2 * i + 1)) & zIndex) >> i; return (ushort)(result >> 1); } IDH_CALL int blocks(int elems, int threadsPerBlock) { if (elems % threadsPerBlock == 0 && elems >= threadsPerBlock) return elems / threadsPerBlock; else return (elems / threadsPerBlock) + 1; } IDH_CALL size_t blocksST(size_t elems, int threadsPerBlock) { if (elems % threadsPerBlock == 0 && elems >= threadsPerBlock) return elems / threadsPerBlock; else return (elems / threadsPerBlock) + 1; } IDH_CALL size_t roundBlocks(size_t elems, int threadsPerBlock) { if (elems < threadsPerBlock) return (size_t)threadsPerBlock; else return blocksST(elems, threadsPerBlock) * threadsPerBlock; } IDH_CALL uint Index2D(uint x, uint y, int pitch) { return y*pitch + x; } IDH_CALL uint IndexZBlock2D(int x, int y, int pitch, __constant ushort* a_mortonTable) // window_size[0] { uint zOrderX = x % Z_ORDER_BLOCK_SIZE; uint zOrderY = y % Z_ORDER_BLOCK_SIZE; uint zIndex = ZIndex(zOrderX, zOrderY, a_mortonTable); uint wBlocks = pitch / Z_ORDER_BLOCK_SIZE; uint blockX = x / Z_ORDER_BLOCK_SIZE; uint blockY = y / Z_ORDER_BLOCK_SIZE; return (blockX + (blockY)*(wBlocks))*Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE + zIndex; } IDH_CALL ushort2 GetXYFromZBlockIndex(uint a_offset, int w, int h) { int blocksSizeX = w / Z_ORDER_BLOCK_SIZE; //int blocksSizeY = h / Z_ORDER_BLOCK_SIZE; int blockId = a_offset / (Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE); int zIdInBlock = a_offset % (Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE); int blockY = blockId / blocksSizeX; int blockX = blockId - blockY*blocksSizeX; int localX = (int)ExtractXFromZIndex(zIdInBlock); int localY = (int)ExtractYFromZIndex(zIdInBlock); ushort2 res; res.x = (ushort)(blockX*Z_ORDER_BLOCK_SIZE + localX); res.y = (ushort)(blockY*Z_ORDER_BLOCK_SIZE + localY); return res; } IDH_CALL uint SpreadBits(int x, int offset) { x = (x | (x << 10)) & 0x000F801F; x = (x | (x << 4)) & 0x00E181C3; x = (x | (x << 2)) & 0x03248649; x = (x | (x << 2)) & 0x09249249; return (uint)(x) << offset; } IDH_CALL uint GetMortonNumber(int x, int y, int z) { return SpreadBits(x, 0) | SpreadBits(y, 1) | SpreadBits(z, 2); } IDH_CALL float3 reflect(float3 dir, float3 normal) { return normalize((normal * dot(dir, normal) * (-2.0f)) + dir); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// ///// a simple tone mapping IDH_CALL float3 ToneMapping(float3 color) { return make_float3(fmin(color.x, 1.0f), fmin(color.y, 1.0f), fmin(color.z, 1.0f)); } IDH_CALL float4 ToneMapping4(float4 color) { return make_float4(fmin(color.x, 1.0f), fmin(color.y, 1.0f), fmin(color.z, 1.0f), fmin(color.w, 1.0f)); } ///////////////////////////////////////////////////////////////////////////////////////////////////////// //// IDH_CALL uint RealColorToUint32_f3(float3 real_color) { float r = real_color.x*255.0f; float g = real_color.y*255.0f; float b = real_color.z*255.0f; unsigned char red = (unsigned char)r, green = (unsigned char)g, blue = (unsigned char)b; return red | (green << 8) | (blue << 16) | 0xFF000000; } IDH_CALL uint RealColorToUint32(float4 real_color) { float r = real_color.x*255.0f; float g = real_color.y*255.0f; float b = real_color.z*255.0f; float a = real_color.w*255.0f; unsigned char red = (unsigned char)r; unsigned char green = (unsigned char)g; unsigned char blue = (unsigned char)b; unsigned char alpha = (unsigned char)a; return red | (green << 8) | (blue << 16) | (alpha << 24); } static inline float3 SafeInverse(float3 d) { const float ooeps = 1.0e-36f; // Avoid div by zero. float3 res; res.x = 1.0f / (fabs(d.x) > ooeps ? d.x : copysign(ooeps, d.x)); res.y = 1.0f / (fabs(d.y) > ooeps ? d.y : copysign(ooeps, d.y)); res.z = 1.0f / (fabs(d.z) > ooeps ? d.z : copysign(ooeps, d.z)); return res; } static inline float epsilonOfPos(float3 hitPos) { return fmax(fmax(fabs(hitPos.x), fmax(fabs(hitPos.y), fabs(hitPos.z))), 2.0f*GEPSILON)*GEPSILON; } static inline float misHeuristicPower1(float p) { return isfinite(p) ? fabs(p) : 0.0f; } static inline float misHeuristicPower2(float p) { return isfinite(p*p) ? p*p : 0.0f; } static inline float misWeightHeuristic(float a, float b) { const float w = misHeuristicPower1(a) / fmax(misHeuristicPower1(a) + misHeuristicPower1(b), DEPSILON2); return isfinite(w) ? w : 0.0f; } static inline float misWeightHeuristic3(float a, float b, float c) { const float w = fabs(a) / fmax(misHeuristicPower1(a) + misHeuristicPower1(b) + misHeuristicPower1(c), DEPSILON2); if(!isfinite(a)) return 1.0f; else return isfinite(w) ? w : 0.0f; } /** \brief offset reflected ray position by epsilon; \param a_hitPos - world space position on surface \param a_surfaceNorm - surface normal at a_hitPos \param a_sampleDir - ray direction in which we are going to trace reflected ray \return offseted ray position */ static inline float3 OffsRayPos(const float3 a_hitPos, const float3 a_surfaceNorm, const float3 a_sampleDir) { const float signOfNormal2 = dot(a_sampleDir, a_surfaceNorm) < 0.0f ? -1.0f : 1.0f; const float offsetEps = epsilonOfPos(a_hitPos); return a_hitPos + signOfNormal2*offsetEps*a_surfaceNorm; } /** \brief offset reflected ray position by epsilon; \param a_hitPos - world space position on surface \param a_surfaceNorm - surface normal at a_hitPos \param a_sampleDir - ray direction in which we are going to trace reflected ray \param a_shadowOffsAux - per poly auxilarry shadow offset. \return offseted ray position */ static inline float3 OffsShadowRayPos(const float3 a_hitPos, const float3 a_surfaceNorm, const float3 a_sampleDir, const float a_shadowOffsAux) { const float signOfNormal2 = dot(a_sampleDir, a_surfaceNorm) < 0.0f ? -1.0f : 1.0f; const float offsetEps = epsilonOfPos(a_hitPos); return a_hitPos + signOfNormal2*(offsetEps + a_shadowOffsAux)*a_surfaceNorm; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// static inline float4x4 make_float4x4(__global const float* a_data) { float4x4 matrix; matrix.row[0] = make_float4(a_data[0], a_data[1], a_data[2], a_data[3]); matrix.row[1] = make_float4(a_data[4], a_data[5], a_data[6], a_data[7]); matrix.row[2] = make_float4(a_data[8], a_data[9], a_data[10], a_data[11]); matrix.row[3] = make_float4(a_data[12], a_data[13], a_data[14], a_data[15]); return matrix; } IDH_CALL float4x4 make_matrix_rotationX(float a_angle) { float sinx = sin(a_angle); float cosx = cos(a_angle); float4x4 matrix; matrix.row[0] = make_float4(1.0f, 0.0f, 0.0f, 0.0f); matrix.row[1] = make_float4(0.0f, cosx, sinx, 0.0f); matrix.row[2] = make_float4(0.0f, -sinx, cosx, 0.0f); matrix.row[3] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); return matrix; } IDH_CALL float4x4 make_matrix_rotationY(float a_angle) { float siny = sin(a_angle); float cosy = cos(a_angle); float4x4 matrix; matrix.row[0] = make_float4(cosy, 0.0f, -siny, 0.0f); matrix.row[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f); matrix.row[2] = make_float4(siny, 0.0f, cosy, 0.0f); matrix.row[3] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); return matrix; } IDH_CALL float3 mul4x3x3(float4x3 m, float3 v) { float3 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z + m.row[0].w; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z + m.row[1].w; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z + m.row[2].w; return res; } IDH_CALL float4 mul4x4x4(float4x4 m, float4 v) { float4 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z + m.row[0].w*v.w; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z + m.row[1].w*v.w; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z + m.row[2].w*v.w; res.w = m.row[3].x*v.x + m.row[3].y*v.y + m.row[3].z*v.z + m.row[3].w*v.w; return res; } #ifndef COMMON_CPLUS_PLUS_CODE IDH_CALL float3 mul(float4x4 m, float3 v) { float3 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z + m.row[0].w; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z + m.row[1].w; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z + m.row[2].w; return res; } #endif IDH_CALL float3 mul3x4(float4x3 m, float3 v) { float3 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z + m.row[0].w; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z + m.row[1].w; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z + m.row[2].w; return res; } IDH_CALL float3x3 make_float3x3(float3 a, float3 b, float3 c) { float3x3 m; m.row[0] = a; m.row[1] = b; m.row[2] = c; return m; } IDH_CALL float3x3 make_float3x3_by_columns(float3 a, float3 b, float3 c) { float3x3 m; m.row[0].x = a.x; m.row[1].x = a.y; m.row[2].x = a.z; m.row[0].y = b.x; m.row[1].y = b.y; m.row[2].y = b.z; m.row[0].z = c.x; m.row[1].z = c.y; m.row[2].z = c.z; return m; } IDH_CALL float3 mul3x3x3(float3x3 m, const float3 v) { float3 res; res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z; res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z; res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z; return res; } IDH_CALL float3x3 mul3x3x3x3(float3x3 m1, float3x3 m2) { float3 column1 = mul3x3x3(m1, make_float3(m2.row[0].x, m2.row[1].x, m2.row[2].x)); float3 column2 = mul3x3x3(m1, make_float3(m2.row[0].y, m2.row[1].y, m2.row[2].y)); float3 column3 = mul3x3x3(m1, make_float3(m2.row[0].z, m2.row[1].z, m2.row[2].z)); return make_float3x3_by_columns(column1, column2, column3); } IDH_CALL float3x3 inverse(float3x3 a) { float det = a.row[0].x * (a.row[1].y * a.row[2].z - a.row[1].z * a.row[2].y) - a.row[0].y * (a.row[1].x * a.row[2].z - a.row[1].z * a.row[2].x) + a.row[0].z * (a.row[1].x * a.row[2].y - a.row[1].y * a.row[2].x); float3x3 b; b.row[0].x = (a.row[1].y * a.row[2].z - a.row[1].z * a.row[2].y); b.row[0].y = -(a.row[0].y * a.row[2].z - a.row[0].z * a.row[2].y); b.row[0].z = (a.row[0].y * a.row[1].z - a.row[0].z * a.row[1].y); b.row[1].x = -(a.row[1].x * a.row[2].z - a.row[1].z * a.row[2].x); b.row[1].y = (a.row[0].x * a.row[2].z - a.row[0].z * a.row[2].x); b.row[1].z = -(a.row[0].x * a.row[1].z - a.row[0].z * a.row[1].x); b.row[2].x = (a.row[1].x * a.row[2].y - a.row[1].y * a.row[2].x); b.row[2].y = -(a.row[0].x * a.row[2].y - a.row[0].y * a.row[2].x); b.row[2].z = (a.row[0].x * a.row[1].y - a.row[0].y * a.row[1].x); float s = 1.0f / det; b.row[0] *= s; b.row[1] *= s; b.row[2] *= s; return b; } #ifndef COMMON_CPLUS_PLUS_CODE static inline float4x4 inverse4x4(float4x4 m1) { float tmp[12]; // temp array for pairs float4x4 m; // calculate pairs for first 8 elements (cofactors) // tmp[0] = m1.row[2].z * m1.row[3].w; tmp[1] = m1.row[2].w * m1.row[3].z; tmp[2] = m1.row[2].y * m1.row[3].w; tmp[3] = m1.row[2].w * m1.row[3].y; tmp[4] = m1.row[2].y * m1.row[3].z; tmp[5] = m1.row[2].z * m1.row[3].y; tmp[6] = m1.row[2].x * m1.row[3].w; tmp[7] = m1.row[2].w * m1.row[3].x; tmp[8] = m1.row[2].x * m1.row[3].z; tmp[9] = m1.row[2].z * m1.row[3].x; tmp[10] = m1.row[2].x * m1.row[3].y; tmp[11] = m1.row[2].y * m1.row[3].x; // calculate first 8 m1.rowents (cofactors) // m.row[0].x = tmp[0] * m1.row[1].y + tmp[3] * m1.row[1].z + tmp[4] * m1.row[1].w; m.row[0].x -= tmp[1] * m1.row[1].y + tmp[2] * m1.row[1].z + tmp[5] * m1.row[1].w; m.row[1].x = tmp[1] * m1.row[1].x + tmp[6] * m1.row[1].z + tmp[9] * m1.row[1].w; m.row[1].x -= tmp[0] * m1.row[1].x + tmp[7] * m1.row[1].z + tmp[8] * m1.row[1].w; m.row[2].x = tmp[2] * m1.row[1].x + tmp[7] * m1.row[1].y + tmp[10] * m1.row[1].w; m.row[2].x -= tmp[3] * m1.row[1].x + tmp[6] * m1.row[1].y + tmp[11] * m1.row[1].w; m.row[3].x = tmp[5] * m1.row[1].x + tmp[8] * m1.row[1].y + tmp[11] * m1.row[1].z; m.row[3].x -= tmp[4] * m1.row[1].x + tmp[9] * m1.row[1].y + tmp[10] * m1.row[1].z; m.row[0].y = tmp[1] * m1.row[0].y + tmp[2] * m1.row[0].z + tmp[5] * m1.row[0].w; m.row[0].y -= tmp[0] * m1.row[0].y + tmp[3] * m1.row[0].z + tmp[4] * m1.row[0].w; m.row[1].y = tmp[0] * m1.row[0].x + tmp[7] * m1.row[0].z + tmp[8] * m1.row[0].w; m.row[1].y -= tmp[1] * m1.row[0].x + tmp[6] * m1.row[0].z + tmp[9] * m1.row[0].w; m.row[2].y = tmp[3] * m1.row[0].x + tmp[6] * m1.row[0].y + tmp[11] * m1.row[0].w; m.row[2].y -= tmp[2] * m1.row[0].x + tmp[7] * m1.row[0].y + tmp[10] * m1.row[0].w; m.row[3].y = tmp[4] * m1.row[0].x + tmp[9] * m1.row[0].y + tmp[10] * m1.row[0].z; m.row[3].y -= tmp[5] * m1.row[0].x + tmp[8] * m1.row[0].y + tmp[11] * m1.row[0].z; // calculate pairs for second 8 m1.rowents (cofactors) // tmp[0] = m1.row[0].z * m1.row[1].w; tmp[1] = m1.row[0].w * m1.row[1].z; tmp[2] = m1.row[0].y * m1.row[1].w; tmp[3] = m1.row[0].w * m1.row[1].y; tmp[4] = m1.row[0].y * m1.row[1].z; tmp[5] = m1.row[0].z * m1.row[1].y; tmp[6] = m1.row[0].x * m1.row[1].w; tmp[7] = m1.row[0].w * m1.row[1].x; tmp[8] = m1.row[0].x * m1.row[1].z; tmp[9] = m1.row[0].z * m1.row[1].x; tmp[10] = m1.row[0].x * m1.row[1].y; tmp[11] = m1.row[0].y * m1.row[1].x; // calculate second 8 m1 (cofactors) // m.row[0].z = tmp[0] * m1.row[3].y + tmp[3] * m1.row[3].z + tmp[4] * m1.row[3].w; m.row[0].z -= tmp[1] * m1.row[3].y + tmp[2] * m1.row[3].z + tmp[5] * m1.row[3].w; m.row[1].z = tmp[1] * m1.row[3].x + tmp[6] * m1.row[3].z + tmp[9] * m1.row[3].w; m.row[1].z -= tmp[0] * m1.row[3].x + tmp[7] * m1.row[3].z + tmp[8] * m1.row[3].w; m.row[2].z = tmp[2] * m1.row[3].x + tmp[7] * m1.row[3].y + tmp[10] * m1.row[3].w; m.row[2].z -= tmp[3] * m1.row[3].x + tmp[6] * m1.row[3].y + tmp[11] * m1.row[3].w; m.row[3].z = tmp[5] * m1.row[3].x + tmp[8] * m1.row[3].y + tmp[11] * m1.row[3].z; m.row[3].z -= tmp[4] * m1.row[3].x + tmp[9] * m1.row[3].y + tmp[10] * m1.row[3].z; m.row[0].w = tmp[2] * m1.row[2].z + tmp[5] * m1.row[2].w + tmp[1] * m1.row[2].y; m.row[0].w -= tmp[4] * m1.row[2].w + tmp[0] * m1.row[2].y + tmp[3] * m1.row[2].z; m.row[1].w = tmp[8] * m1.row[2].w + tmp[0] * m1.row[2].x + tmp[7] * m1.row[2].z; m.row[1].w -= tmp[6] * m1.row[2].z + tmp[9] * m1.row[2].w + tmp[1] * m1.row[2].x; m.row[2].w = tmp[6] * m1.row[2].y + tmp[11] * m1.row[2].w + tmp[3] * m1.row[2].x; m.row[2].w -= tmp[10] * m1.row[2].w + tmp[2] * m1.row[2].x + tmp[7] * m1.row[2].y; m.row[3].w = tmp[10] * m1.row[2].z + tmp[4] * m1.row[2].x + tmp[9] * m1.row[2].y; m.row[3].w -= tmp[8] * m1.row[2].y + tmp[11] * m1.row[2].z + tmp[5] * m1.row[2].x; // calculate matrix inverse // float k = 1.0f / (m1.row[0].x * m.row[0].x + m1.row[0].y * m.row[1].x + m1.row[0].z * m.row[2].x + m1.row[0].w * m.row[3].x); for (int i = 0; i<4; i++) { m.row[i].x *= k; m.row[i].y *= k; m.row[i].z *= k; m.row[i].w *= k; } return m; } // Look At matrix creation // return the inverse view matrix // IDH_CALL float4x4 lookAt(float3 eye, float3 center, float3 up) { float3 x, y, z; // basis; will make a rotation matrix z.x = eye.x - center.x; z.y = eye.y - center.y; z.z = eye.z - center.z; z = normalize(z); y.x = up.x; y.y = up.y; y.z = up.z; x = cross(y, z); // X vector = Y cross Z y = cross(z, x); // Recompute Y = Z cross X // cross product gives area of parallelogram, which is < 1.0 for // non-perpendicular unit-length vectors; so normalize x, y here x = normalize(x); y = normalize(y); float4x4 M; M.row[0].x = x.x; M.row[1].x = x.y; M.row[2].x = x.z; M.row[3].x = -x.x * eye.x - x.y * eye.y - x.z*eye.z; M.row[0].y = y.x; M.row[1].y = y.y; M.row[2].y = y.z; M.row[3].y = -y.x * eye.x - y.y * eye.y - y.z*eye.z; M.row[0].z = z.x; M.row[1].z = z.y; M.row[2].z = z.z; M.row[3].z = -z.x * eye.x - z.y * eye.y - z.z*eye.z; M.row[0].w = 0.0; M.row[1].w = 0.0; M.row[2].w = 0.0; M.row[3].w = 1.0; return M; } static inline float4x4 transpose(const float4x4 a_mat) { float4x4 res; res.row[0].x = a_mat.row[0].x; res.row[0].y = a_mat.row[1].x; res.row[0].z = a_mat.row[2].x; res.row[0].w = a_mat.row[3].x; res.row[1].x = a_mat.row[0].y; res.row[1].y = a_mat.row[1].y; res.row[1].z = a_mat.row[2].y; res.row[1].w = a_mat.row[3].y; res.row[2].x = a_mat.row[0].z; res.row[2].y = a_mat.row[1].z; res.row[2].z = a_mat.row[2].z; res.row[2].w = a_mat.row[3].z; res.row[3].x = a_mat.row[0].w; res.row[3].y = a_mat.row[1].w; res.row[3].z = a_mat.row[2].w; res.row[3].w = a_mat.row[3].w; return res; } #endif ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// IDH_CALL float3 EyeRayDir(float x, float y, float w, float h, float4x4 a_mViewProjInv) // g_mViewProjInv { float4 pos = make_float4( 2.0f * (x + 0.5f) / w - 1.0f, -2.0f * (y + 0.5f) / h + 1.0f, 0.0f, 1.0f ); pos = mul4x4x4(a_mViewProjInv, pos); pos /= pos.w; pos.y *= (-1.0f); return normalize(to_float3(pos)); } IDH_CALL void matrix4x4f_mult_ray3(float4x4 a_mWorldViewInv, __private float3* ray_pos, __private float3* ray_dir) // g_mWorldViewInv { float3 pos = mul(a_mWorldViewInv, (*ray_pos)); float3 pos2 = mul(a_mWorldViewInv, ((*ray_pos) + 100.0f*(*ray_dir))); float3 diff = pos2 - pos; (*ray_pos) = pos; (*ray_dir) = normalize(diff); } ///////////////////////////////////////////////////////////////////////////////////////////////////////// //// IDH_CALL float3 matrix3x3f_mult_float3(__global const float* M, float3 v) { float3 res; res.x = M[0 * 3 + 0] * v.x + M[0 * 3 + 1] * v.y + M[0 * 3 + 2] * v.z; res.y = M[1 * 3 + 0] * v.x + M[1 * 3 + 1] * v.y + M[1 * 3 + 2] * v.z; res.z = M[2 * 3 + 0] * v.x + M[2 * 3 + 1] * v.y + M[2 * 3 + 2] * v.z; return res; } IDH_CALL float DistanceSquared(float3 a, float3 b) { float3 diff = b - a; return dot(diff, diff); } IDH_CALL float UniformConePdf(float cosThetaMax) { return 1.0f / (2.0f * M_PI * (1.0f - cosThetaMax)); } IDH_CALL float3 UniformSampleSphere(float u1, float u2) { float z = 1.0f - 2.0f * u1; float r = sqrt(fmax(0.0f, 1.0f - z*z)); float phi = 2.0f * M_PI * u2; float x = r * cos(phi); float y = r * sin(phi); return make_float3(x, y, z); } IDH_CALL float lerp2(float t, float a, float b) { return (1.0f - t) * a + t * b; } IDH_CALL float3 UniformSampleCone(float u1, float u2, float costhetamax, float3 x, float3 y, float3 z) { float costheta = lerp2(u1, costhetamax, 1.0f); float sintheta = sqrt(1.0f - costheta*costheta); float phi = u2 * 2.0f * M_PI; return cos(phi) * sintheta * x + sin(phi) * sintheta * y + costheta * z; } IDH_CALL float2 RaySphereIntersect(float3 rayPos, float3 rayDir, float3 sphPos, float radius) { float3 k = rayPos - sphPos; float b = dot(k, rayDir); float c = dot(k, k) - radius*radius; float d = b * b - c; float2 res; if (d >= 0.0f) { float sqrtd = sqrt(d); float t1 = -b - sqrtd; float t2 = -b + sqrtd; res.x = fmin(t1, t2); res.y = fmax(t1, t2); } else { res.x = -1e28f; res.y = -1e28f; } return res; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct ObjectListTriangle { float4 v1; float4 v2; float4 v3; }; struct ObjectListSphere { float3 pos; float r; }; struct ObjectList { #ifndef OCL_COMPILER #ifndef __CUDACC__ ObjectList() { m_triangleCount = m_offset = dummy1 = dummy2 = 0; } inline ObjectListTriangle* GetTriangles() const { return (ObjectListTriangle*)(((char*)this) + sizeof(ObjectList)); } inline const ObjectListSphere* GetSpheres() const { return (const ObjectListSphere*)((char*)this + sizeof(ObjectList) + m_triangleCount*sizeof(ObjectListTriangle)); } #endif #endif int m_offset; int m_triangleCount; int dummy1; int dummy2; }; IDH_CALL int GetNumTriangles(struct ObjectList ol) { return ol.m_triangleCount; } IDH_CALL int GetOffset(struct ObjectList ol) { return ol.m_offset; } IDH_CALL int GetNumPrimitives(struct ObjectList ol) { return GetNumTriangles(ol); } struct ALIGN_S(16) Lite_HitT { float t; int primId; int instId; int geomId; }; typedef struct Lite_HitT Lite_Hit; IDH_CALL Lite_Hit Make_Lite_Hit(float t, int a_treeId) { int a_geomId = 0; Lite_Hit hit; hit.t = t; hit.primId = -1; hit.instId = -1; hit.geomId = (a_geomId & 0x3FFFFFFF) | ((a_treeId << 30) & 0xC0000000); return hit; } static inline bool HitNone(const Lite_Hit a_hit) { return (a_hit.primId == -1) || !isfinite(a_hit.t); } static inline bool HitSome(const Lite_Hit a_hit) { return (a_hit.primId != -1) && isfinite(a_hit.t); } IDH_CALL int IS_LEAF(int a_leftOffsetAndLeaf) { return a_leftOffsetAndLeaf & 0x80000000; } IDH_CALL int PACK_LEAF_AND_OFFSET(int a_leftOffset, int leaf) { return (a_leftOffset & 0x7fffffff) | (leaf & 0x80000000); } IDH_CALL int EXTRACT_OFFSET(int a_leftOffsetAndLeaf) { return a_leftOffsetAndLeaf & 0x7fffffff; } // a know about bit fields, but in CUDA they didn't work // struct BVHNodeT { #ifndef __CUDACC__ #ifndef OCL_COMPILER BVHNodeT() { m_leftOffsetAndLeaf = 0xffffffff; m_escapeIndex = 0xffffffff; m_boxMin = float3(INFINITY, INFINITY, INFINITY); m_boxMax = float3(-INFINITY, -INFINITY, -INFINITY); } inline unsigned int Leaf() const { return (m_leftOffsetAndLeaf & 0x80000000) >> 31; } inline void SetLeaf(unsigned int a_Leaf) { m_leftOffsetAndLeaf = (m_leftOffsetAndLeaf & 0x7fffffff) | ((a_Leaf) << 31); } inline void SetLeftOffset(unsigned int in_offset) { m_leftOffsetAndLeaf = (m_leftOffsetAndLeaf & 0x80000000) | (in_offset & 0x7fffffff); } inline void SetObjectListOffset(unsigned int in_offset) { if (Leaf()) SetLeftOffset(in_offset); } inline unsigned int GetLeftOffset() const { return m_leftOffsetAndLeaf & 0x7fffffff; } inline unsigned int GetRightOffset() const { return GetLeftOffset() + 1; } inline unsigned int GetObjectListOffset() const { return GetLeftOffset(); } inline void SetInstance(unsigned int a_Leaf) { m_escapeIndex = a_Leaf; } inline unsigned int Instance() const { return (m_escapeIndex == 1); } #endif #endif float3 m_boxMin; unsigned int m_leftOffsetAndLeaf; float3 m_boxMax; unsigned int m_escapeIndex; }; typedef struct BVHNodeT BVHNode; IDH_CALL bool IsValidNode(const BVHNode a_node) { return !((a_node.m_leftOffsetAndLeaf == 0xffffffff) && (a_node.m_escapeIndex == 0xffffffff)); } struct _PACKED RayFlagsT { unsigned char diffuseBounceNum; unsigned char bounceNum; unsigned short otherFlags; }; typedef struct RayFlagsT RayFlags; enum MATERIAL_EVENT { RAY_EVENT_S = 1, ///< Indicates Specular reflection or refraction (check for RAY_EVENT_T) RAY_EVENT_D = 2, ///< Indicates Diffuse reflection or translucent (check for RAY_EVENT_T) RAY_EVENT_G = 4, ///< Indicates GLossy reflection or refraction (check for RAY_EVENT_T) RAY_EVENT_T = 8, ///< Indicates Transparensy or reftacrion. RAY_EVENT_V = 16, ///< Indicates Volume scattering, not used for a while RAY_EVENT_TOUT = 32, ///< Indicates Transparensy Outside of water or glass or e.t.c. (old RAY_IS_INSIDE_TRANSPARENT_OBJECT = 128) RAY_EVENT_TNINGLASS = 64, }; static inline bool isPureSpecular(const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_S) != 0; } static inline bool isDiffuse (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_D) != 0; } static inline bool isGlossy (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_G) != 0; } static inline bool isTransparent (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_T) != 0; } enum { RAY_GRAMMAR_DIRECT_LIGHT = 64, RAY_GRAMMAR_OUT_OF_SCENE = 128, RAY_DUMMY_FLAG_NOT_USED = 256, RAY_HIT_SURFACE_FROM_OTHER_SIDE = 2048, RAY_IS_DEAD = 4096, // set when ray had account environment or died on the surface RAY_SHADE_FROM_OTHER_SIDE = 8192, RAY_SHADE_FROM_SKY_LIGHT = 16384, RAY_WILL_DIE_NEXT_BOUNCE = 32768, // set when ray had account only light on next bounce and then immediately die }; static inline uint unpackRayFlags(uint a_flags) { return ((a_flags & 0xFFFF0000) >> 16); } static inline uint packRayFlags(uint a_oldData, uint a_flags) { return (a_oldData & 0x0000FFFF) | (a_flags << 16); } static inline uint unpackBounceNum(uint a_flags) { return ((a_flags & 0x0000FF00) >> 8); } static inline uint unpackBounceNumDiff(uint a_flags) { return (a_flags & 0x000000FF); } static inline uint packBounceNum (uint a_oldData, uint a_bounceNum) { return (a_oldData & 0xFFFF00FF) | (a_bounceNum << 8); } static inline uint packBounceNumDiff(uint a_oldData, uint a_bounceNum) { return (a_oldData & 0xFFFFFF00) | (a_bounceNum); } static inline bool rayIsActiveS(RayFlags a_flags) { return (a_flags.otherFlags & (RAY_GRAMMAR_OUT_OF_SCENE | RAY_IS_DEAD)) == 0; } static inline bool rayIsActiveU(uint a_flags) { return ( ((a_flags & 0xFFFF0000) >> 16) & (RAY_GRAMMAR_OUT_OF_SCENE | RAY_IS_DEAD)) == 0; } static inline bool isEyeRay(uint a_flags) { const uint otherFlags = unpackRayFlags(a_flags); const bool haveSomeNonSpecularReflections = (otherFlags & RAY_EVENT_D) || (otherFlags & RAY_EVENT_G); return (unpackBounceNum(a_flags) == 0) || !haveSomeNonSpecularReflections; // return (unpackBounceNum(a_flags) == 0); } /** \brief This structure is used as transit to pass MIS-weights-important-data from previouce bounce to current (or from current to next). */ typedef struct MisDataT { float matSamplePdf; ///< previous angle pdf (pdfW) that were used for sampling material. float cosThetaPrev; ///< previous angle cos; it allow to compute projected angle pdf (pdfWP = pdfW/cosThetaPrev); int prevMaterialOffset; ///< offset in material buffer to material leaf (elemental brdf) that were sampled on prev bounce; it is needed to disable caustics; int isSpecular; ///< indicate if bounce was pure specular; } MisData; static inline MisData makeInitialMisData() { MisData data; data.matSamplePdf = 1.0f; data.cosThetaPrev = 1.0f; data.prevMaterialOffset = -1; data.isSpecular = 1; return data; } static inline uint encodeNormal(float3 n) { short x = (short)(n.x*32767.0f); short y = (short)(n.y*32767.0f); ushort sign = (n.z >= 0) ? 0 : 1; int sx = ((int)(x & 0xfffe) | sign); int sy = ((int)(y & 0xfffe) << 16); return (sx | sy); } static inline float3 decodeNormal(uint a_data) { const float divInv = 1.0f / 32767.0f; short a_enc_x, a_enc_y; a_enc_x = (short)(a_data & 0x0000FFFF); a_enc_y = (short)((int)(a_data & 0xFFFF0000) >> 16); float sign = (a_enc_x & 0x0001) ? -1.0f : 1.0f; float x = (short)(a_enc_x & 0xfffe)*divInv; float y = (short)(a_enc_y & 0xfffe)*divInv; float z = sign*sqrt(fmax(1.0f - x*x - y*y, 0.0f)); return make_float3(x, y, z); } struct ALIGN_S(16) HitPosNormT { float pos_x; float pos_y; float pos_z; uint norm_xy; #ifdef __CUDACC__ __device__ float3 GetNormal() const { return decodeNormal(norm_xy); } __device__ void SetNormal(float3 a_norm) { norm_xy = encodeNormal(normalize(a_norm)); } #endif }; typedef struct HitPosNormT HitPosNorm; ID_CALL HitPosNorm make_HitPosNorm(float4 a_data) { HitPosNorm res; res.pos_x = a_data.x; res.pos_y = a_data.y; res.pos_z = a_data.z; res.norm_xy = (uint)(as_int(a_data.w)); return res; } IDH_CALL float3 GetPos(HitPosNorm a_data) { return make_float3(a_data.pos_x, a_data.pos_y, a_data.pos_z); } IDH_CALL void SetPos(__private HitPosNorm* a_pData, float3 a_pos) { a_pData->pos_x = a_pos.x; a_pData->pos_y = a_pos.y; a_pData->pos_z = a_pos.z; } struct ALIGN_S(8) HitTexCoordT { float tex_u; float tex_v; }; typedef struct HitTexCoordT HitTexCoord; struct ALIGN_S(8) HitMatRefT { int m_data; float accumDist; }; typedef struct HitMatRefT HitMatRef; IDH_CALL int GetMaterialId(HitMatRef a_hitMat) { return a_hitMat.m_data; } IDH_CALL void SetHitType(__private HitMatRef* a_pHitMat, int a_id) { int mask = a_id << 28; int m_data2 = a_pHitMat->m_data & 0x0FFFFFFF; a_pHitMat->m_data = m_data2 | mask; } IDH_CALL void SetMaterialId(__private HitMatRef* a_pHitMat, int a_mat_id) { int mask = a_mat_id & 0x0FFFFFFF; int m_data2 = a_pHitMat->m_data & 0xF0000000; a_pHitMat->m_data = m_data2 | mask; } struct ALIGN_S(8) Hit_Part4T { uint tangentCompressed; uint bitangentCompressed; }; typedef struct Hit_Part4T Hit_Part4; static inline void CoordinateSystem(float3 v1, __private float3* v2, __private float3* v3) { float invLen = 1.0f; if (fabs(v1.x) > fabs(v1.y)) { invLen = 1.0f / sqrt(v1.x*v1.x + v1.z*v1.z); (*v2) = make_float3(-v1.z * invLen, 0.0f, v1.x * invLen); } else { invLen = 1.0f / sqrt(v1.y*v1.y + v1.z*v1.z); (*v2) = make_float3(0.0f, v1.z * invLen, -v1.y * invLen); } (*v3) = cross(v1, (*v2)); } IDH_CALL float3 MapSampleToCosineDistribution(float r1, float r2, float3 direction, float3 hit_norm, float power) { if(power >= 1e6f) return direction; float sin_phi = sin(2.0f*r1*3.141592654f); float cos_phi = cos(2.0f*r1*3.141592654f); //sincos(2.0f*r1*3.141592654f, &sin_phi, &cos_phi); float cos_theta = pow(1.0f - r2, 1.0f / (power + 1.0f)); float sin_theta = sqrt(1.0f - cos_theta*cos_theta); float3 deviation; deviation.x = sin_theta*cos_phi; deviation.y = sin_theta*sin_phi; deviation.z = cos_theta; float3 ny = direction, nx, nz; CoordinateSystem(ny, &nx, &nz); { float3 temp = ny; ny = nz; nz = temp; } float3 res = nx*deviation.x + ny*deviation.y + nz*deviation.z; float invSign = dot(direction, hit_norm) > 0.0f ? 1.0f : -1.0f; if (invSign*dot(res, hit_norm) < 0.0f) // reflected ray is below surface #CHECK_THIS { res = (-1.0f)*nx*deviation.x + ny*deviation.y - nz*deviation.z; //belowSurface = true; } return res; } // Using the modified Phong reflectance model for physically based rendering // IDH_CALL float3 MapSampleToModifiedCosineDistribution(float r1, float r2, float3 direction, float3 hit_norm, float power) { if (power >= 1e6f) return direction; // float sin_phi, cos_phi; // sincosf(2 * r1*3.141592654f, &sin_phi, &cos_phi); float sin_phi = sin(2.0f*r1*3.141592654f); float cos_phi = cos(2.0f*r1*3.141592654f); float sin_theta = sqrt(1.0f - pow(r2, 2.0f / (power + 1.0f))); float3 deviation; deviation.x = sin_theta*cos_phi; deviation.y = sin_theta*sin_phi; deviation.z = sqrt(1.0f - deviation.x*deviation.x - deviation.y*deviation.y); //pow(r2, 1.0f / (power + 1.0f)); float3 ny = direction, nx, nz; CoordinateSystem(ny, &nx, &nz); { float3 temp = ny; ny = nz; nz = temp; } float3 res = nx*deviation.x + ny*deviation.y + nz*deviation.z; float invSign = dot(direction, hit_norm) >= 0.0f ? 1.0f : -1.0f; if (invSign*dot(res, hit_norm) < 0.0f) // reflected ray is below surface #CHECK_THIS res = (-1.0f)*nx*deviation.x - ny*deviation.y + nz*deviation.z; return res; } /** \brief transform float2 sample in rect [-1,1]x[-1,1] to disc centered at (0,0) with radius == 1. \param xy - input sample in rect [-1,1]x[-1,1] \return position in disc */ static inline float2 MapSamplesToDisc(float2 xy) { float x = xy.x; float y = xy.y; float r = 0; float phi = 0; float2 res = xy; if (x>y && x>-y) { r = x; phi = 0.25f*3.141592654f*(y / x); } if (x < y && x > -y) { r = y; phi = 0.25f*3.141592654f*(2.0f - x / y); } if (x < y && x < -y) { r = -x; phi = 0.25f*3.141592654f*(4.0f + y / x); } if (x >y && x<-y) { r = -y; phi = 0.25f*3.141592654f*(6 - x / y); } //float sin_phi, cos_phi; //sincosf(phi, &sin_phi, &cos_phi); float sin_phi = sin(phi); float cos_phi = cos(phi); res.x = r*sin_phi; res.y = r*cos_phi; return res; } static inline float3 MapSamplesToCone(float cosCutoff, float2 sample, float3 direction) { float cosTheta = (1.0f - sample.x) + sample.x * cosCutoff; float sinTheta = sqrt(1.0f - cosTheta * cosTheta); //float sinPhi, cosPhi; //sincosf(2.0f * M_PI * sample.y, &sinPhi, &cosPhi); float sinPhi = sin(2.0f * M_PI * sample.y); float cosPhi = cos(2.0f * M_PI * sample.y); float3 deviation = make_float3(cosPhi * sinTheta, sinPhi * sinTheta, cosTheta); // transform to different basis // float3 ny = direction; float3 nx, nz; CoordinateSystem(ny, &nx, &nz); //swap(ny, nz); { float3 temp = ny; ny = nz; nz = temp; } return nx*deviation.x + ny*deviation.y + nz*deviation.z; } IDH_CALL float3 MapSamplesToSphere(float r1, float r2) // [-1..1] { float phi = r1*3.141592654f * 2.0f; // [0 .. 2PI] float h = r2*2.0f - 1.0f; // [-1 .. 1] float sin_phi = sin(phi); float cos_phi = cos(phi); float x = sin_phi*sqrt(1 - h*h); float y = cos_phi*sqrt(1 - h*h); float z = h; return make_float3(x, y, z); } struct ALIGN_S(16) ZBlockT { #ifndef OCL_COMPILER #ifndef __CUDACC__ ZBlockT() { index = 0; diff = 100; counter = 0; index2 = 0; } ZBlockT(int a_index, float a_diff) { index = a_index; index2 = 0; diff = a_diff; counter = 0; } #endif inline static int GetSize() { return Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE; } inline int GetOffset() const { return index*GetSize(); } inline bool operator<(const ZBlockT& rhs) const { return (diff < rhs.diff); } #endif int index; // just block offset if global screen buffer int index2; // index in other buffer + avg trace depth int counter; // how many times this block was traced? float diff; // error in some units. stop criterion if fact }; typedef struct ZBlockT ZBlock; //IDH_CALL uint unpackAvgTraceDepth(uint a_flags) { return ((a_flags & 0xFF000000) >> 24); } //IDH_CALL uint packAvgTraceDepth(uint a_oldData, uint a_flags) { return (a_oldData & 0x00FFFFFF) | (a_flags << 24); } //IDH_CALL uint unpackIndex2(uint a_flags) { return (a_flags & 0x00FFFFFF); } //IDH_CALL uint packIndex2(uint a_oldData, uint a_flags) { return (a_oldData & 0xFF000000) | a_flags; } static bool BlockFinished(ZBlock block, int a_minRaysPerPixel, int a_maxRaysPerPixel, float* a_outDiff) // for use on the cpu side ... for current { int samplesPerPixel = block.counter; // was *2 due to odd and even staff if(a_outDiff!=NULL) *a_outDiff = block.diff; float acceptedBadPixels = 8.0f; // sqrt((float)(CMP_RESULTS_BLOCK_SIZE)); int minRaysPerPixel = a_minRaysPerPixel; bool summErrorOk = (block.diff <= acceptedBadPixels); bool maxErrorOk = false; return ((summErrorOk || maxErrorOk) && samplesPerPixel >= minRaysPerPixel) || (samplesPerPixel >= a_maxRaysPerPixel); } IDH_CALL uint ThreadSwizzle1D(uint pixelId, uint zBlockIndex) { uint indexInsideZBlock = pixelId%CMP_RESULTS_BLOCK_SIZE; return zBlockIndex*CMP_RESULTS_BLOCK_SIZE + indexInsideZBlock; } static inline float PdfAtoW(const float aPdfA, const float aDist, const float aCosThere) { return (aPdfA*aDist*aDist) / fmax(aCosThere, DEPSILON2); } static inline float PdfWtoA(const float aPdfW, const float aDist, const float aCosThere) { return aPdfW * fabs(aCosThere) / fmax(aDist*aDist, DEPSILON2); } struct MRaysStat { #ifndef OCL_COMPILER MRaysStat() { memset(this, 0, sizeof(MRaysStat)); } #endif int traceTimePerCent; float raysPerSec; float samplesPerSec; float reorderTimeMs; float traversalTimeMs; float samLightTimeMs; float shadowTimeMs; float shadeTimeMs; float bounceTimeMS; float evalHitMs; float nextBounceMs; float sampleTimeMS; }; IDH_CALL float probabilityAbsorbRR(uint a_flags, uint a_globalFlags) { if (a_globalFlags & HRT_ENABLE_MMLT) // metropolis don't use roultte return 0.0f; const uint diffBounceNum = unpackBounceNumDiff(a_flags); const uint otherFlags = unpackRayFlags(a_flags); float pabsorb = 0.0f; if (diffBounceNum >= 4) pabsorb = 0.50f; else if (diffBounceNum >= 3) pabsorb = 0.25f; else pabsorb = 0.0f; return pabsorb; } static inline float MonteCarloVariance(float3 avgColor, float sqrColor, int nSamples) { const float maxColor = fmax(avgColor.x, fmax(avgColor.y, avgColor.z)); const float fnSamples = ((float)(nSamples)); const float nSampleInv = 1.0f / fnSamples; return fabs(sqrColor*nSampleInv - (maxColor*maxColor*nSampleInv*nSampleInv)); } static inline float MonteCarloRelErr(float maxColor, float sqrColor, int nSamples) { const float fnSamples = ((float)(nSamples)); const float nSampleInv = 1.0f / fnSamples; const float variance = fabs(sqrColor*nSampleInv - (maxColor*maxColor*nSampleInv*nSampleInv)); const float stdError = sqrt(variance); return stdError / (fmax(maxColor, 0.00001f)); } static inline float MonteCarloRelErr2(float3 avgColor, float sqrColor, int nSamples) { const float maxColor = fmax(avgColor.x, fmax(avgColor.y, avgColor.z)); return MonteCarloRelErr(maxColor, sqrColor, nSamples); } static inline float colorSquareMax3(float3 calcColor) { float3 calcColorSqr; calcColorSqr.x = calcColor.x*calcColor.x; calcColorSqr.y = calcColor.y*calcColor.y; calcColorSqr.z = calcColor.z*calcColor.z; return fmax(calcColorSqr.x, fmax(calcColorSqr.y, calcColorSqr.z)); } static inline float colorSquareMax4(float4 calcColor) { float4 calcColorSqr; calcColorSqr.x = calcColor.x*calcColor.x; calcColorSqr.y = calcColor.y*calcColor.y; calcColorSqr.z = calcColor.z*calcColor.z; calcColorSqr.w = calcColor.w*calcColor.w; return fmax(calcColorSqr.x, fmax(calcColorSqr.y, calcColorSqr.z)); } // Unpolarized fresnel reflection term for dielectric materials // this formula is simplified and should be checked // static inline float fresnelCoeffSimple(float cosThetaI, float a_eta) { float g = sqrt(a_eta*a_eta - 1.0f + cosThetaI * cosThetaI); float t1 = (g - cosThetaI) / (g + cosThetaI); float t2 = (cosThetaI * (g + cosThetaI) - 1) / (cosThetaI * (g - cosThetaI) + 1.0f); return 0.5f * t1 * t1 * (1.0f + t2 * t2); } // The following functions calculate the reflected and refracted // directions in addition to the fresnel coefficients. Based on PBRT // and the paper "Derivation of Refraction Formulas" by Paul S. Heckbert. // static inline float fresnelDielectric(float cosTheta1, float cosTheta2, float etaExt, float etaInt) { float Rs = (etaExt * cosTheta1 - etaInt * cosTheta2) / (etaExt * cosTheta1 + etaInt * cosTheta2); float Rp = (etaInt * cosTheta1 - etaExt * cosTheta2) / (etaInt * cosTheta1 + etaExt * cosTheta2); return (Rs * Rs + Rp * Rp) / 2.0f; } static inline float fresnelConductor(float cosTheta, float eta, float roughness) { float tmp = (eta*eta + roughness*roughness) * (cosTheta * cosTheta); float rParl2 = (tmp - (eta * (2.0f * cosTheta)) + 1.0f) / (tmp + (eta * (2.0f * cosTheta)) + 1.0f); float tmpF = eta*eta + roughness*roughness; float rPerp2 = (tmpF - (eta * (2.0f * cosTheta)) + (cosTheta*cosTheta)) / (tmpF + (eta * (2.0f * cosTheta)) + (cosTheta*cosTheta)); return (rParl2 + rPerp2) / 2.0f; } static inline float fresnelReflectionCoeff(float cosTheta1, float etaExt, float etaInt) { // Swap the indices of refraction if the interaction starts // at the inside of the object // if (cosTheta1 < 0.0f) { float temp = etaInt; etaInt = etaExt; etaExt = temp; } // Using Snell's law, calculate the sine of the angle // between the transmitted ray and the surface normal // float sinTheta2 = etaExt / etaInt * sqrt(fmax(0.0f, 1.0f - cosTheta1*cosTheta1)); if (sinTheta2 > 1.0f) return 1.0f; // Total internal reflection! // Use the sin^2+cos^2=1 identity - max() guards against // numerical imprecision // float cosTheta2 = sqrt(fmax(0.0f, 1.0f - sinTheta2*sinTheta2)); // Finally compute the reflection coefficient // return fresnelDielectric(fabs(cosTheta1), cosTheta2, etaInt, etaExt); } static inline float fresnelReflectionCoeffMentalLike(float cosTheta, float refractIOR) { return fresnelReflectionCoeff(fabs(cosTheta), 1.0f, refractIOR); } static inline float contribFunc(float3 color) { return fmax(0.33334f*(color.x + color.y + color.z), 0.0f); } static inline int packXY1616(int x, int y) { return (y << 16) | (x & 0x0000FFFF); } // CPU and CUDA only code // #ifndef OCL_COMPILER IDH_CALL float3 clamp3(float3 x, float a, float b) { return make_float3(fmin(fmax(x.x, a), b), fmin(fmax(x.y, a), b), fmin(fmax(x.z, a), b)); } static unsigned short MortonTable256Host[] = { 0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015, 0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055, 0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115, 0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155, 0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415, 0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455, 0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515, 0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555, 0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015, 0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055, 0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115, 0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155, 0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415, 0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455, 0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515, 0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555, 0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015, 0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055, 0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115, 0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155, 0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415, 0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455, 0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515, 0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555, 0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015, 0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055, 0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115, 0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155, 0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415, 0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455, 0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515, 0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555 }; static inline uint ZIndexHost(ushort x, ushort y) { return MortonTable256Host[y >> 8] << 17 | MortonTable256Host[x >> 8] << 16 | MortonTable256Host[y & 0xFF] << 1 | MortonTable256Host[x & 0xFF]; } static inline uint HostIndexZBlock2D(int x, int y, int pitch) { uint zOrderX = x % Z_ORDER_BLOCK_SIZE; uint zOrderY = y % Z_ORDER_BLOCK_SIZE; uint zIndex = ZIndexHost(zOrderX, zOrderY); uint wBlocks = pitch / Z_ORDER_BLOCK_SIZE; uint blockX = x / Z_ORDER_BLOCK_SIZE; uint blockY = y / Z_ORDER_BLOCK_SIZE; return (blockX + (blockY)*(wBlocks))*Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE + zIndex; } static void ImageZBlockMemToRowPitch(const float4* inData, float4* outData, int w, int h) { #pragma omp parallel for for (int y = 0; y<h; y++) { for (int x = 0; x<w; x++) { int indexSrc = HostIndexZBlock2D(x, y, w); int indexDst = Index2D(x, y, w); outData[indexDst] = inData[indexSrc]; } } } #endif #ifdef __CUDACC__ #undef ushort #undef uint #endif #define PREFIX_SUMM_MACRO(idata,odata,l_Data,_bsize) \ { \ uint pos = 2 * LOCAL_ID_X - (LOCAL_ID_X & (_bsize - 1)); \ l_Data[pos] = 0; \ pos += _bsize; \ l_Data[pos] = idata; \ \ for (uint offset = 1; offset < _bsize; offset <<= 1) \ { \ SYNCTHREADS_LOCAL; \ uint t = l_Data[pos] + l_Data[pos - offset]; \ SYNCTHREADS_LOCAL; \ l_Data[pos] = t; \ } \ \ odata = l_Data[pos]; \ } \ enum CLEAR_FLAGS{ CLEAR_MATERIALS = 1, CLEAR_GEOMETRY = 2, CLEAR_LIGHTS = 4, CLEAR_TEXTURES = 8, CLEAR_CUSTOM_DATA = 16, CLEAR_ALL = CLEAR_MATERIALS | CLEAR_GEOMETRY | CLEAR_LIGHTS | CLEAR_TEXTURES | CLEAR_CUSTOM_DATA }; enum BVH_FLAGS { BVH_ENABLE_SMOOTH_OPACITY = 1}; typedef struct GBuffer1T { float depth; float3 norm; float4 rgba; int matId; float coverage; } GBuffer1; typedef struct GBuffer2T { float2 texCoord; int objId; int instId; } GBuffer2; typedef struct GBufferAll { GBuffer1 data1; GBuffer2 data2; } GBufferAll; static inline void initGBufferAll(__private GBufferAll* a_pElem) { a_pElem->data1.depth = 1e+6f; a_pElem->data1.norm = make_float3(0, 0, 0); a_pElem->data1.rgba = make_float4(0, 0, 0, 1); a_pElem->data1.matId = -1; a_pElem->data1.coverage = 0.0f; a_pElem->data2.texCoord = make_float2(0, 0); a_pElem->data2.objId = -1; a_pElem->data2.instId = -1; } #define GBUFFER_SAMPLES 16 #define PMPIX_SAMPLES 256 // Production Mode Pixel Samples static inline float4 packGBuffer1(GBuffer1 a_input) { float4 resColor; unsigned int packedRGBX = RealColorToUint32(a_input.rgba); const float clampedCoverage = fmin(fmax(a_input.coverage*255.0f, 0.0f), 255.0f); const int compressedCoverage = ((int)(clampedCoverage)) << 24; const int packedMIdAncCov = (a_input.matId & 0x00FFFFFF) | (compressedCoverage & 0xFF000000); resColor.x = a_input.depth; resColor.y = as_float(encodeNormal(a_input.norm)); resColor.z = as_float(packedMIdAncCov); resColor.w = as_float(packedRGBX); return resColor; } static inline GBuffer1 unpackGBuffer1(float4 a_input) { GBuffer1 res; res.depth = a_input.x; res.norm = decodeNormal(as_int(a_input.y)); res.matId = as_int(a_input.z) & 0x00FFFFFF; const int compressedCoverage = (as_int(a_input.z) & 0xFF000000) >> 24; res.coverage = ((float)compressedCoverage)*(1.0f / 255.0f); unsigned int rgba = as_int(a_input.w); res.rgba.x = (rgba & 0x000000FF)*(1.0f / 255.0f); res.rgba.y = ((rgba & 0x0000FF00) >> 8)*(1.0f / 255.0f); res.rgba.z = ((rgba & 0x00FF0000) >> 16)*(1.0f / 255.0f); res.rgba.w = ((rgba & 0xFF000000) >> 24)*(1.0f / 255.0f); return res; } static inline float4 packGBuffer2(GBuffer2 a_input) { float4 res; res.x = a_input.texCoord.x; res.y = a_input.texCoord.y; res.z = as_float(a_input.objId); res.w = as_float(a_input.instId); return res; } static inline GBuffer2 unpackGBuffer2(float4 a_input) { GBuffer2 res; res.texCoord.x = a_input.x; res.texCoord.y = a_input.y; res.objId = as_int(a_input.z); res.instId = as_int(a_input.w); return res; } static inline float projectedPixelSize(float dist, float FOV, float w, float h) { float ppx = (FOV / w)*dist; float ppy = (FOV / h)*dist; if (dist > 0.0f) return 2.0f*fmax(ppx, ppy); else return 1000.0f; } static inline float surfaceSimilarity(float4 data1, float4 data2, const float MADXDIFF) { const float MANXDIFF = 0.15f; float3 n1 = to_float3(data1); float3 n2 = to_float3(data2); float dist = length(n1 - n2); if (dist >= MANXDIFF) return 0.0f; float d1 = data1.w; float d2 = data2.w; if (fabs(d1 - d2) >= MADXDIFF) return 0.0f; float normalSimilar = sqrt(1.0f - (dist / MANXDIFF)); float depthSimilar = sqrt(1.0f - fabs(d1 - d2) / MADXDIFF); return normalSimilar * depthSimilar; } static inline float gbuffDiff(GBufferAll s1, GBufferAll s2, const float a_fov, float w, float h) { const float ppSize = projectedPixelSize(s1.data1.depth, a_fov, w, h); const float surfaceSimilar = surfaceSimilarity(to_float4(s1.data1.norm, s1.data1.depth), to_float4(s2.data1.norm, s2.data1.depth), ppSize*2.0f); const float surfaceDiff = 1.0f - surfaceSimilar; const float objDiff = (s1.data2.instId == s2.data2.instId && s1.data2.objId == s2.data2.objId) ? 0.0f : 1.0f; const float matDiff = (s1.data1.matId == s2.data1.matId) ? 0.0f : 1.0f; const float alphaDiff = fabs(s1.data1.rgba.w - s2.data1.rgba.w); return surfaceDiff + objDiff + matDiff + alphaDiff; } static inline float gbuffDiffObj(GBufferAll s1, GBufferAll s2, const float a_fov, int w, int h) { const float objDiff = (s1.data2.instId == s2.data2.instId && s1.data2.objId == s2.data2.objId) ? 0.0f : 1.0f; const float matDiff = (s1.data1.matId == s2.data1.matId) ? 0.0f : 1.0f; return objDiff + matDiff; } static inline int reverseBits(int a_input, int a_maxSize) { int maxBit = 0; while (a_maxSize >>= 1) ++maxBit; int result = 0; for (int i = 0; i < maxBit; i++) { const int j = maxBit - i - 1; const int inputMask = (0x00000001 << j); result |= ((a_input & inputMask) >> j) << i; } return result; } enum PLAIN_LIGHT_TYPES { PLAIN_LIGHT_TYPE_POINT_OMNI = 0, PLAIN_LIGHT_TYPE_POINT_SPOT = 1, PLAIN_LIGHT_TYPE_DIRECT = 2, PLAIN_LIGHT_TYPE_SKY_DOME = 3, PLAIN_LIGHT_TYPE_AREA = 4, PLAIN_LIGHT_TYPE_SPHERE = 5, PLAIN_LIGHT_TYPE_CYLINDER = 6, PLAIN_LIGHT_TYPE_MESH = 7, }; enum PLAIN_LIGHT_FLAGS{ DISABLE_SAMPLING = 1, SEPARATE_SKY_LIGHT_ENVIRONMENT = 2, SKY_LIGHT_USE_PEREZ_ENVIRONMENT = 4, AREA_LIGHT_SKY_PORTAL = 8, LIGHT_HAS_IES = 16, ///< have spherical distribution mask around light LIGHT_IES_POINT_AREA = 32, ///< apply IES honio from the center of light always. LIGHT_DO_NOT_SAMPLE_ME = 64, ///< zero selection probability. never sample it. }; enum SKY_PORTAL_COLOR_SOURCE { SKY_PORTAL_SOURCE_ENVIRONMENT = 1, SKY_PORTAL_SOURCE_SKYLIGHT = 2, SKY_PORTAL_SOURCE_CUSTOM = 3 }; static inline float3 triBaricentrics3(float3 ray_pos, float3 ray_dir, float3 A_pos, float3 B_pos, float3 C_pos) { const float3 edge1 = B_pos - A_pos; const float3 edge2 = C_pos - A_pos; const float3 pvec = cross(ray_dir, edge2); const float det = dot(edge1, pvec); const float inv_det = 1.0f / det; const float3 tvec = ray_pos - A_pos; const float v = dot(tvec, pvec)*inv_det; const float3 qvec = cross(tvec, edge1); const float u = dot(ray_dir, qvec)*inv_det; const float t = dot(edge2, qvec)*inv_det; return make_float3(u, v, t); } typedef struct ShadeContextT { float3 wp; ///< world pos //float3 lp; ///< local pos float3 l; ///< direction to light float3 v; ///< view vector float3 n; ///< smooth normal (for shading and new rays offsets) float3 fn; ///< flat normal (for bump mapping and tangent space transform) float3 tg; ///< tangent (for bump mapping and tangent space transform) float3 bn; ///< binormal (for bump mapping and tangent space transform) float2 tc; ///< tex coord (0); //float2 tc1; ///< tex coord (1); bool hfi; ///< Hit.From.Inside. if hit surface from the inside of the object that have glass or SSS material } ShadeContext; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define MAXPROCTEX 16 #define F4_PROCTEX_SIZE 12 /** \brief this structure will store results of procedural texture kernel execution. */ typedef struct ProcTextureListT { int currMaxProcTex; int id_f4 [MAXPROCTEX]; float3 fdata4[MAXPROCTEX]; } ProcTextureList; static inline void InitProcTextureList(__private ProcTextureList* a_pList) { a_pList->currMaxProcTex = 0; a_pList->id_f4[0] = INVALID_TEXTURE; } static inline void WriteProcTextureList(__global float4* fdata, int tid, int size, __private const ProcTextureList* a_pList) { __global int* idata = (__global int*)fdata; if(a_pList->currMaxProcTex == 0) { idata[tid] = INVALID_TEXTURE; return; } const int finalProcTex = (a_pList->currMaxProcTex > MAXPROCTEX) ? MAXPROCTEX : a_pList->currMaxProcTex; for(int i=0;i<finalProcTex;i++) idata[tid + size * i] = a_pList->id_f4[i]; if(finalProcTex < MAXPROCTEX) idata[tid + size * finalProcTex] = INVALID_TEXTURE; // list end #ifdef OCL_COMPILER for(int i=0;i<finalProcTex;i+=2) { const float4 h1 = to_float4(a_pList->fdata4[i+0], 0.0f); const float4 h2 = to_float4(a_pList->fdata4[i+1], 0.0f); float8 data = {h1.x, h1.y, h1.z, h1.w, h2.x, h2.y, h2.z, h2.w,}; const int offset = (tid + size * (i/2 + MAXPROCTEX/4)); vstore_half8(data, 0, (__global half*)(fdata + offset) ); } #endif } static inline void ReadProcTextureList(__global float4* fdata, int tid, int size, __private ProcTextureList* a_pList) { if (fdata == 0) return; __global int* idata = (__global int*)fdata; int currMaxProcTex; for(currMaxProcTex = 0; currMaxProcTex < MAXPROCTEX; currMaxProcTex++) { const int texId = idata[tid + size * currMaxProcTex]; a_pList->id_f4[currMaxProcTex] = texId; if(texId == INVALID_TEXTURE) break; } #ifdef OCL_COMPILER for(int i=0;i<currMaxProcTex;i+=2) { const int offset = (tid + size * (i/2 + MAXPROCTEX/4)); const float8 data = vload_half8(0, (__global half*)(fdata + offset)); a_pList->fdata4[i+0] = to_float3(data.s0123); a_pList->fdata4[i+1] = to_float3(data.s4567); } #endif a_pList->currMaxProcTex = currMaxProcTex; } /** \brief get color for precomputed procedural texture \param a_texId - input tex id \param a_pList - input ptl \return texture color; */ static inline float4 readProcTex(int a_texId, const __private ProcTextureList* a_pList) { //for(int i=0; i<maxIter; i++) //{ // if(a_texId == a_pList->id_f4[i]) // return to_float4(a_pList->fdata4[i], 0.0f); //} // //return make_float4(1, 1, 1, -1.0f); const int maxIter = (a_pList->currMaxProcTex < MAXPROCTEX) ? a_pList->currMaxProcTex : MAXPROCTEX; // min float4 quad1 = make_float4(1, 1, 1, -1.0f); quad1 = (0 < maxIter && a_texId == a_pList->id_f4[0]) ? to_float4(a_pList->fdata4[0], 0.0f) : quad1; quad1 = (1 < maxIter && a_texId == a_pList->id_f4[1]) ? to_float4(a_pList->fdata4[1], 0.0f) : quad1; quad1 = (2 < maxIter && a_texId == a_pList->id_f4[2]) ? to_float4(a_pList->fdata4[2], 0.0f) : quad1; quad1 = (3 < maxIter && a_texId == a_pList->id_f4[3]) ? to_float4(a_pList->fdata4[3], 0.0f) : quad1; float4 quad2 = make_float4(1, 1, 1, -1.0f); quad2 = (4 < maxIter && a_texId == a_pList->id_f4[4]) ? to_float4(a_pList->fdata4[4], 0.0f) : quad2; quad2 = (5 < maxIter && a_texId == a_pList->id_f4[5]) ? to_float4(a_pList->fdata4[5], 0.0f) : quad2; quad2 = (6 < maxIter && a_texId == a_pList->id_f4[6]) ? to_float4(a_pList->fdata4[6], 0.0f) : quad2; quad2 = (7 < maxIter && a_texId == a_pList->id_f4[7]) ? to_float4(a_pList->fdata4[7], 0.0f) : quad2; const float4 quad12 = (quad1.w != -1.0f) ? quad1 : quad2; float4 quad3 = make_float4(1, 1, 1, -1.0f); quad3 = (8 < maxIter && a_texId == a_pList->id_f4[8]) ? to_float4(a_pList->fdata4[8], 0.0f) : quad3; quad3 = (9 < maxIter && a_texId == a_pList->id_f4[9]) ? to_float4(a_pList->fdata4[9], 0.0f) : quad3; quad3 = (10 < maxIter && a_texId == a_pList->id_f4[10]) ? to_float4(a_pList->fdata4[10], 0.0f) : quad3; quad3 = (11 < maxIter && a_texId == a_pList->id_f4[11]) ? to_float4(a_pList->fdata4[11], 0.0f) : quad3; float4 quad4 = make_float4(1, 1, 1, -1.0f); quad4 = (12 < maxIter && a_texId == a_pList->id_f4[12]) ? to_float4(a_pList->fdata4[12], 0.0f) : quad4; quad4 = (13 < maxIter && a_texId == a_pList->id_f4[13]) ? to_float4(a_pList->fdata4[13], 0.0f) : quad4; quad4 = (14 < maxIter && a_texId == a_pList->id_f4[14]) ? to_float4(a_pList->fdata4[14], 0.0f) : quad4; quad4 = (15 < maxIter && a_texId == a_pList->id_f4[15]) ? to_float4(a_pList->fdata4[15], 0.0f) : quad4; const float4 quad34 = (quad3.w != -1.0f) ? quad3 : quad4; return (quad12.w != -1.0f) ? quad12 : quad34; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef struct ShadowSampleT { float3 pos; float3 color; float pdf; float maxDist; float cosAtLight; bool isPoint; } ShadowSample; static inline void WriteShadowSample(const __private ShadowSample* a_pSam, float a_lightProbSel, int a_lightOffset, int a_tid, int a_threadNum, __global float4* a_out) { const float pdfAndIsPoint = a_pSam->isPoint ? (-1.0f)*a_pSam->pdf : a_pSam->pdf; a_out[a_tid + a_threadNum*0] = make_float4(a_pSam->pos.x, a_pSam->pos.y, a_pSam->pos.z, pdfAndIsPoint); a_out[a_tid + a_threadNum*1] = make_float4(a_pSam->color.x, a_pSam->color.y, a_pSam->color.z, a_pSam->maxDist); a_out[a_tid + a_threadNum*2] = make_float4(a_pSam->cosAtLight, a_lightProbSel, as_float(a_lightOffset), 0); } static inline void ReadShadowSample(const __global float4* a_in, int a_tid, int a_threadNum, __private ShadowSample* a_pSam, __private float* a_pLightProbSel, __private int* a_pLightOffset) { const float4 f0 = a_in[a_tid + a_threadNum*0]; const float4 f1 = a_in[a_tid + a_threadNum*1]; const float4 f2 = a_in[a_tid + a_threadNum*2]; a_pSam->pos.x = f0.x; a_pSam->pos.y = f0.y; a_pSam->pos.z = f0.z; a_pSam->pdf = fabs(f0.w); a_pSam->isPoint = (f0.w <= 0); // this is ok, if pdf is 0, it can be only point light a_pSam->color.x = f1.x; a_pSam->color.y = f1.y; a_pSam->color.z = f1.z; a_pSam->maxDist = f1.w; a_pSam->cosAtLight = f2.x; (*a_pLightProbSel) = f2.y; (*a_pLightOffset) = as_int(f2.z); } /** \brief Per ray accumulated (for all bounces) data. */ typedef struct ALIGN_S(16) PerRayAccT { float pdfGTerm; ///< accumulated G term equal to product of G(x1,x2,x3) for all bounces; for 3-Way we aqctually don't need it and mult it with "-1" to store first bounce specular flag from light direcction. float pdfLightWP; ///< accumulated probability per projected solid angle for light path float pdfCameraWP; ///< accumulated probability per projected solid angle for camera path float pdfCamA0; ///< equal to pdfWP[0]*G[0] (if [0] means light) } PerRayAcc; static inline PerRayAcc InitialPerParAcc() { PerRayAcc res; res.pdfGTerm = 1.0f; res.pdfLightWP = 1.0f; res.pdfCameraWP = 1.0f; res.pdfCamA0 = 1.0f; return res; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef struct SurfaceHitT { float3 pos; float3 normal; float3 flatNormal; float3 tangent; float3 biTangent; float2 texCoord; int matId; float t; float sRayOff; bool hfi; } SurfaceHit; #define PV_PACK_VALID_FIELD 1 #define PV_PACK_WASSP_FIELD 2 #define PV_PACK_HITFI_FIELD 4 // Hit From Inside #define PV_PACK_RCONN_FIELD 8 // Ready For Connect (pack this if capera path don't hit light but store camera vertex istead). #define SURFACE_HIT_SIZE_IN_F4 4 static inline void WriteSurfaceHit(const __private SurfaceHit* a_pHit, int a_tid, int a_threadNum, __global float4* a_out) { const float4 f1 = to_float4(a_pHit->pos, a_pHit->texCoord.x); const float4 f2 = to_float4(a_pHit->normal, a_pHit->texCoord.y); const float4 f3 = make_float4(as_float( encodeNormal(a_pHit->flatNormal)), as_float( encodeNormal(a_pHit->tangent)), as_float( encodeNormal(a_pHit->biTangent)), as_float( a_pHit->matId) ); // ignore (hit.t, hit.sRayOff) because bpt don't need them! const int bit3 = a_pHit->hfi ? PV_PACK_HITFI_FIELD : 0; const float4 f4 = make_float4(a_pHit->t, a_pHit->sRayOff, 0, as_float(bit3)); a_out[a_tid + 0*a_threadNum] = f1; a_out[a_tid + 1*a_threadNum] = f2; a_out[a_tid + 2*a_threadNum] = f3; a_out[a_tid + 3*a_threadNum] = f4; } static inline void WriteSurfaceHitMatId(const int a_matId, int a_tid, int a_threadNum, __global float4* a_out) { const float4 f3 = make_float4(0, 0, 0, as_float(a_matId)); a_out[a_tid + 2*a_threadNum] = f3; } static inline void ReadSurfaceHit(const __global float4* a_in, int a_tid, int a_threadNum, __private SurfaceHit* a_pHit) { const float4 f1 = a_in[a_tid + 0*a_threadNum]; const float4 f2 = a_in[a_tid + 1*a_threadNum]; const float4 f3 = a_in[a_tid + 2*a_threadNum]; const float4 f4 = a_in[a_tid + 3*a_threadNum]; a_pHit->pos = to_float3 (f1); a_pHit->texCoord.x = f1.w; a_pHit->normal = to_float3 (f2); a_pHit->texCoord.y = f2.w; a_pHit->flatNormal = decodeNormal(as_int(f3.x)); a_pHit->tangent = decodeNormal(as_int(f3.y)); a_pHit->biTangent = decodeNormal(as_int(f3.z)); a_pHit->matId = as_int(f3.w); a_pHit->t = f4.x; a_pHit->sRayOff = f4.y; const int flags = as_int(f4.w); a_pHit->hfi = ((flags & PV_PACK_HITFI_FIELD) != 0); } static inline int ReadSurfaceHitMatId(const __global float4* a_in, int a_tid, int a_threadNum) { const float4 f3 = a_in[a_tid + 2*a_threadNum]; return as_int(f3.w); } static inline float3 ReadSurfaceHitPos(const __global float4* a_in, int a_tid, int a_threadNum) { return to_float3(a_in[a_tid + 0*a_threadNum]); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// enum PLAIN_MAT_TYPES { PLAIN_MAT_CLASS_PHONG_SPECULAR = 0, PLAIN_MAT_CLASS_BLINN_SPECULAR = 1, // Micro Facet Torrance Sparrow model with Blinn distribution PLAIN_MAT_CLASS_PERFECT_MIRROR = 2, PLAIN_MAT_CLASS_THIN_GLASS = 3, PLAIN_MAT_CLASS_GLASS = 4, PLAIN_MAT_CLASS_TRANSLUCENT = 5, PLAIN_MAT_CLASS_SHADOW_MATTE = 6, PLAIN_MAT_CLASS_LAMBERT = 7, PLAIN_MAT_CLASS_OREN_NAYAR = 8, PLAIN_MAT_CLASS_BLEND_MASK = 9, PLAIN_MAT_CLASS_EMISSIVE = 10, PLAIN_MAT_CLASS_VOLUME_PERLIN = 11, PLAIN_MAT_CLASS_SSS = 12 }; enum PLAIN_MAT_FLAGS{ PLAIN_MATERIAL_IS_LIGHT = 1, PLAIN_MATERIAL_CAST_CAUSTICS = 2, PLAIN_MATERIAL_HAS_DIFFUSE = 4, PLAIN_MATERIAL_HAS_TRANSPARENCY = 8, PLAIN_MATERIAL_INVERT_NMAP_X = 16, PLAIN_MATERIAL_INVERT_NMAP_Y = 32, PLAIN_MATERIAL_INVERT_SWAP_NMAP_XY = 64, PLAIN_MATERIAL_INVERT_HEIGHT = 128, PLAIN_MATERIAL_SKIP_SHADOW = 256, PLAIN_MATERIAL_FORBID_EMISSIVE_GI = 512, PLAIN_MATERIAL_SKIP_SKY_PORTAL = 1024, PLAIN_MATERIAL_EMISSION_FALOFF = 2048, // This flag marks node as a real blend of different materials. // It used for blending emissive properties and normal maps. // PLAIN_MATERIAL_SURFACE_BLEND = 4096, PLAIN_MATERIAL_HAVE_BTDF = 8192, PLAIN_MATERIAL_INVIS_LIGHT = 16384, PLAIN_MATERIAL_CAN_SAMPLE_REFL_ONLY = 32768, PLAIN_MATERIAL_HAVE_PROC_TEXTURES = 32768*2, PLAIN_MATERIAL_LOCAL_AO1 = 32768*4, PLAIN_MATERIAL_LOCAL_AO2 = 32768*8, PLAIN_MATERIAL_CAMERA_MAPPED_REFL = 32768*16, }; #define PLAIN_MATERIAL_DATA_SIZE 192 #define PLAIN_MATERIAL_CUSTOM_DATA_SIZE 80 #define MIX_TREE_MAX_DEEP 7 struct PlainMaterialT { float data[PLAIN_MATERIAL_DATA_SIZE]; }; typedef struct PlainMaterialT PlainMaterial; // emissive component, always present in material to speed-up code // #define EMISSIVE_COLORX_OFFSET 4 #define EMISSIVE_COLORY_OFFSET 5 #define EMISSIVE_COLORZ_OFFSET 6 #define EMISSIVE_TEXID_OFFSET 7 #define EMISSIVE_TEXMATRIXID_OFFSET 8 #define EMISSIVE_LIGHTID_OFFSET 9 #define OPACITY_TEX_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+1) #define OPACITY_TEX_MATRIX (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+2) #define NORMAL_TEX_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+3) #define NORMAL_TEX_MATRIX (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+4) #define EMISSIVE_BLEND_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+5) #define PARALLAX_HEIGHT (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+6) #define EMISSIVE_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+8) #define NORMAL_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+20) #define OPACITY_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+32) // #define PROC_TEX1_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+44) // FREE SLOT! // #define PROC_TEX2_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+45) // FREE SLOT! // #define PROC_TEX3_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+46) // FREE SLOT! // #define PROC_TEX4_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+47) // FREE SLOT! // #define PROC_TEX5_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+48) // FREE SLOT! #define PROC_TEX_TABLE_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+49) #define PROC_TEX_AO_TYPE (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+50) #define PROC_TEX_AO_SAMPLER (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+52) #define PROC_TEX_TEX_ID (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+64) #define PROC_TEXMATRIX_ID (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+65) #define PROC_TEX_AO_LENGTH (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+66) #define PROC_TEX_AO_TYPE2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+67) #define PROC_TEX_AO_SAMPLER2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+68) #define PROC_TEX_TEX_ID2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+80) #define PROC_TEXMATRIX_ID2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+81) #define PROC_TEX_AO_LENGTH2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+82) #define PROC_TEX1_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+83) #define PROC_TEXN_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+99) enum AO_TYPES { AO_TYPE_NONE = 0, AO_TYPE_UP = 1, AO_TYPE_DOWN = 2, AO_TYPE_BOTH = 4 }; #define PLAIN_MAT_TYPE_OFFSET 0 #define PLAIN_MAT_FLAGS_OFFSET 1 #define PLAIN_MAT_COMPONENTS_OFFSET 2 static inline int materialGetType (__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PLAIN_MAT_TYPE_OFFSET]); } static inline int materialGetFlags (__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]); } static inline bool materialCastCaustics (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_CAST_CAUSTICS) != 0; } static inline bool materialHasTransparency (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_HAS_TRANSPARENCY) != 0; } static inline bool materialIsSkyPortal (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_SKIP_SKY_PORTAL) != 0; } static inline bool materialIsInvisLight (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_INVIS_LIGHT) != 0; } static inline void PutProcTexturesIdListToMaterialHead(const ProcTextureList* a_pData, PlainMaterial* a_pMat) { for(int i=0;i<a_pData->currMaxProcTex;i++) ((int*)(a_pMat->data))[PROC_TEX1_F4_HEAD_OFFSET + i] = a_pData->id_f4[i]; for(int i=a_pData->currMaxProcTex; i<MAXPROCTEX; i++) ((int*)(a_pMat->data))[PROC_TEX1_F4_HEAD_OFFSET + i] = INVALID_TEXTURE; } static inline void GetProcTexturesIdListFromMaterialHead(__global const PlainMaterial* a_pMat, __private ProcTextureList* a_pData) { int currMaxProcTex; for(currMaxProcTex = 0; currMaxProcTex < MAXPROCTEX; currMaxProcTex++) { const int texId = as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET + currMaxProcTex]); a_pData->id_f4[currMaxProcTex] = texId; if(texId == INVALID_TEXTURE) break; } a_pData->currMaxProcTex = currMaxProcTex; } static inline bool materialHeadHaveTargetProcTex(__global const PlainMaterial* a_pMat, int a_texId) { return (as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+0]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+1]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+2]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+3]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+4]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+5]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+6]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+7]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+8]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+9]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+10]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+11]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+12]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+13]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+14]) == a_texId || as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+15]) == a_texId); } static inline bool MaterialHaveAtLeastOneProcTex(__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET]) != INVALID_TEXTURE; } static inline bool MaterialHaveAO(__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PROC_TEX_AO_TYPE]) != AO_TYPE_NONE; } static inline bool MaterialHaveAO2(__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PROC_TEX_AO_TYPE]) != AO_TYPE_NONE && as_int(a_pMat->data[PROC_TEX_AO_TYPE2]) != AO_TYPE_NONE; } #define EVAL_FLAG_DEFAULT 0 #define EVAL_FLAG_DISABLE_CAUSTICS 1 #define EVAL_FLAG_FWD_DIR 2 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** \brief Select index proportional to piecewise constant function that is stored in a_accum[0 .. N-2]; Binary search version. \param a_r - input random variable in rage [0, 1] \param a_accum - input float array. it must be a result of prefix summ - i.e. it must be sorted. \param N - size of extended array - i.e. a_accum[N-1] == summ(a_accum[0 .. N-2]). \param pPDF - out parameter. probability of picking up found value. \return found index */ static int SelectIndexPropToOpt(const float a_r, __global const float* a_accum, const int N, __private float* pPDF) { int leftBound = 0; int rightBound = N - 2; // because a_accum[N-1] == summ(a_accum[0 .. N-2]). int counter = 0; int currPos = -1; const int maxStep = 50; const float x = a_r*a_accum[N - 1]; while (rightBound - leftBound > 1 && counter < maxStep) { const int currSize = rightBound + leftBound; const int currPos1 = (currSize % 2 == 0) ? (currSize + 1) / 2 : (currSize + 0) / 2; const float a = a_accum[currPos1 + 0]; const float b = a_accum[currPos1 + 1]; if (a < x && x <= b) { currPos = currPos1; break; } else if (x <= a) rightBound = currPos1; else if (x > b) leftBound = currPos1; counter++; } if (currPos < 0) // check the rest intervals { const float a1 = a_accum[leftBound + 0]; const float b1 = a_accum[leftBound + 1]; const float a2 = a_accum[rightBound + 0]; const float b2 = a_accum[rightBound + 1]; if (a1 < x && x <= b1) currPos = leftBound; if (a2 < x && x <= b2) currPos = rightBound; } if (x == 0.0f) currPos = 0; else if (currPos < 0) currPos = (rightBound + leftBound + 1) / 2; (*pPDF) = (a_accum[currPos + 1] - a_accum[currPos]) / a_accum[N - 1]; return currPos; } /** \brief search for for the lower bound (left range) \param a - array \param length - array size \param left_range - value to search for */ static inline int binarySearchForLeftRange(__global const int2* a, int length, int left_range) { if (a[length - 1].x < left_range) return -1; int low = 0; int high = length - 1; while (low <= high) { int mid = low + ((high - low) / 2); if (a[mid].x >= left_range) high = mid - 1; else //if(a[mid]<i) low = mid + 1; } return high + 1; } /** \brief search for for the upper bound (right range) \param a - array \param length - array size \param left_range - value to search for */ static inline int binarySearchForRightRange(__global const int2* a, int length, int right_range) { if (a[0].x > right_range) return -1; int low = 0; int high = length - 1; while (low <= high) { int mid = low + ((high - low) / 2); if (a[mid].x > right_range) high = mid - 1; else //if(a[mid]<i) low = mid + 1; } return low - 1; } /** \brief perform material id remap for instanced objects; \param a_mId - input old material id \param a_instId - input instance id \param in_remapInst - array/table that maps instance id to remap list id \param a_instTabSize - max instance id / size of 'in_remapInst' array \param in_allMatRemapLists - all remap listss packed in to single array \param in_remapTable - array/table that store offset inside 'in_allMatRemapLists' for each remap list which id we got from 'in_remapInst' \papam a_remapTableSize - size of 'in_remapTable' array \return new material id */ static inline int remapMaterialId(int a_mId, int a_instId, __global const int* in_remapInst, int a_instTabSize, __global const int* in_allMatRemapLists, __global const int2* in_remapTable, int a_remapTableSize) { if (a_mId < 0 || a_instId < 0 || a_instId >= a_instTabSize || in_remapInst == 0 || in_allMatRemapLists == 0 || in_remapTable == 0) return a_mId; const int remapListId = in_remapInst[a_instId]; if(remapListId < 0 || remapListId >= a_remapTableSize) // || remapListId >= some size return a_mId; const int2 offsAndSize = in_remapTable[remapListId]; // int res = a_mId; // for (int i = 0; i < offsAndSize.y; i++) // #TODO: change to binery search // { // int idRemapFrom = in_allMatRemapLists[offsAndSize.x + i * 2 + 0]; // int idRemapTo = in_allMatRemapLists[offsAndSize.x + i * 2 + 1]; // // if (idRemapFrom == a_mId) // { // res = idRemapTo; // break; // } // } int low = 0; int high = offsAndSize.y - 1; while (low <= high) { const int mid = low + ((high - low) / 2); const int idRemapFrom = in_allMatRemapLists[offsAndSize.x + mid * 2 + 0]; if (idRemapFrom >= a_mId) high = mid - 1; else //if(a[mid]<i) low = mid + 1; } if (high+1 < offsAndSize.y) { const int idRemapFrom = in_allMatRemapLists[offsAndSize.x + (high + 1) * 2 + 0]; const int idRemapTo = in_allMatRemapLists[offsAndSize.x + (high + 1) * 2 + 1]; const int res = (idRemapFrom == a_mId) ? idRemapTo : a_mId; return res; } else return a_mId; } #define AO_RAYS_PACKED 4 static inline ushort4 compressShadow(float3 shadow) { ushort4 shadowCompressed; shadowCompressed.x = (ushort)(65535.0f * shadow.x); shadowCompressed.y = (ushort)(65535.0f * shadow.y); shadowCompressed.z = (ushort)(65535.0f * shadow.z); shadowCompressed.w = 0; return shadowCompressed; } static inline float3 decompressShadow(ushort4 shadowCompressed) { const float invNormCoeff = 1.0f / 65535.0f; return invNormCoeff*make_float3((float)shadowCompressed.x, (float)shadowCompressed.y, (float)shadowCompressed.z); } #define SPLIT_DL_BY_GRAMMAR true //#define SBDPT_DEBUG_SPLIT 0 //#define SBDPT_DEBUG_DEPTH 4 //#define SBDPT_CHECK_BOUNCE 4 //#define SBDPT_INDIRECT_ONLY (void) #endif
fixed.c
#include <stdio.h> static int i, counter; #pragma omp threadprivate(i) int main() { #pragma omp parallel for (i = 0; i < 100000; i++) #pragma omp atomic counter++; printf("counter=%d\n", counter); return 0; }
assign_1.c
// // Created by nick on 1/30/18. // #include "limits.h" #include "stdio.h" #include "stdlib.h" #include "memory.h" #include "omp.h" #include <sys/time.h> // ======== Defines for the entire project ========= // Getting the value of the macro as a string #define STR(name) #name #define MACRO_VALUE(name) STR(name) // Define the number of histogram bins here #define HIST_BINS 10 // Define the number of threads to use #define THREADS 12 // Define the chunk size #define CHUNK 8 // Define the scheduling type #define SCHED_TYPE dynamic // Define the scheduling to use #define SCHEDULE schedule(SCHED_TYPE, CHUNK) // Define the scheduling value as a string #define SCHED_VALUE MACRO_VALUE(SCHEDULE) // Const defines for when no parameters are given const int N_global = 10000; const int M_global = 1000; // gets the current time in seconds with microsecond precision double get_time() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec * 1e-6; } // Initializes a matrix of size NxN with values of [0, M) void init(int N, int M, int A[N][N]) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = rand() % M; } } } // Prints every value inside of a matrix void print_matrix(int N, int matrix[N][N]) { for (int j = 0; j < N; j++) { for (int i = 0; i < N; i++) { printf("%d\t", matrix[i][j]); } printf("\n"); } } // Prints every value inside of a histogram void print_histogram(int hist[]) { int sum = 0; for (int i = 0; i < HIST_BINS; i++) { sum += hist[i]; printf("%d\t", hist[i]); } printf("Sum: %d\n", sum); } // Testing macros used so that the main void doesn't have so much clutter #define PERFORM_TEST_MAX(function) \ initialClock = get_time(); \ max = function(N, matrix); \ executionTime = (get_time() - initialClock) * 1000.f; \ printf("Found the max for %s: %d in %.2f ms\n", #function, max, executionTime); #define PERFORM_TEST_BINS(function) \ initialClock = get_time(); \ function(N, M, hist, matrix); \ executionTime = (get_time() - initialClock) * 1000.0f; \ printf("Created the histogram with %s in %.2f ms\n", #function, executionTime); \ print_histogram(hist); \ memset(hist, 0, sizeof(hist)); // Serial: No parallel at all, just brute force it on one thread int find_matrix_max_s(int N, int matrix[N][N]); void fill_bins_s(int N, int M, int hist[], int matrix[N][N]); // Parallel 1: Manual decomposition int find_matrix_max_p1(int N, int matrix[N][N]); void fill_bins_p1(int N, int M, int hist[], int matrix[N][N]); // Parallel 2: Use "for" construct without "reduction" clause int find_matrix_max_p2(int N, int matrix[N][N]); void fill_bins_p2(int N, int M, int hist[], int matrix[N][N]); // Parallel 3: Use "for" construct with "reduction" clause int find_matrix_max_p3(int N, int matrix[N][N]); void fill_bins_p3(int N, int M, int hist[], int matrix[N][N]); int main(int argc, char *argv[]) { int N, M; // Gather the size of matrix (N) and max possible value (M) if (argc < 3) { // Use the globals when the arguments weren't passed right N = N_global; M = M_global; } else { N = atoi(argv[1]); M = atoi(argv[2]); } // Setup random generator srand(1 << 12); // Generate the matrix int (*matrix)[N] = malloc(sizeof(int[N][N])); init(N, M, matrix); // Create the histogram array int hist[HIST_BINS]; memset(hist, 0, sizeof(hist)); //print_matrix(N, matrix); // Things used by the macro double initialClock, executionTime; // used for timing int max; // used for storing the max printf("Running the tests with %d thread(s) and %s\n", THREADS, SCHED_VALUE); // Perform the "find the max" tests PERFORM_TEST_MAX(find_matrix_max_s); PERFORM_TEST_MAX(find_matrix_max_p1); PERFORM_TEST_MAX(find_matrix_max_p2); PERFORM_TEST_MAX(find_matrix_max_p3); // Perform the histogram tests PERFORM_TEST_BINS(fill_bins_s); PERFORM_TEST_BINS(fill_bins_p1); PERFORM_TEST_BINS(fill_bins_p2); PERFORM_TEST_BINS(fill_bins_p3); return 0; } // ============= Implementations ================= // Serial int find_matrix_max_s(int N, int matrix[N][N]) { int toReturn = INT_MIN; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) if (matrix[i][j] > toReturn) toReturn = matrix[i][j]; return toReturn; } void fill_bins_s(int N, int M, int hist[], int matrix[N][N]) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { hist[k]++; break; } } } } } // Parallel-1 int find_matrix_max_p1(int N, int matrix[N][N]) { int toReturn = INT_MIN; int num_threads; #pragma omp parallel shared(matrix) num_threads(THREADS) { int i, j, start, end; #pragma omp single { num_threads = omp_get_num_threads(); } int pID = omp_get_thread_num(); start = pID * N / num_threads; end = ((pID + 1) * N) / num_threads; for (i = start; i < end; i++) for (j = 0; j < N; j++) if (matrix[i][j] > toReturn) #pragma omp critical toReturn = matrix[i][j]; }; return toReturn; } void fill_bins_p1(int N, int M, int hist[], int matrix[N][N]) { int num_threads; #pragma omp parallel num_threads(THREADS) { int i, j, start, end; int local_hist[HIST_BINS]; memset(local_hist, 0, sizeof(local_hist)); #pragma omp single { num_threads = omp_get_num_threads(); } int pID = omp_get_thread_num(); start = pID * N / num_threads; end = ((pID + 1) * N) / num_threads; for (i = start; i < end; i++) { for (j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { local_hist[k]++; break; } } } } #pragma omp critical for (i = 0; i < HIST_BINS; i++) { hist[i] += local_hist[i]; } }; } // Parallel-2 int find_matrix_max_p2(int N, int matrix[N][N]) { int toReturn = INT_MIN; #pragma omp parallel for num_threads(THREADS) SCHEDULE for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) if (matrix[i][j] > toReturn) toReturn = matrix[i][j]; return toReturn; } void fill_bins_p2(int N, int M, int hist[], int matrix[N][N]) { #pragma omp parallel num_threads(THREADS) { int local_bins[HIST_BINS]; memset(local_bins, 0, sizeof(local_bins)); #pragma omp for nowait SCHEDULE for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { local_bins[k]++; break; } } } } #pragma omp critical for (int i = 0; i < HIST_BINS; i++) { hist[i] += local_bins[i]; } }; } // Parallel-3 int find_matrix_max_p3(int N, int matrix[N][N]) { int toReturn = INT_MIN; #pragma omp parallel for reduction(max:toReturn) num_threads(THREADS) SCHEDULE for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) if (matrix[i][j] > toReturn) toReturn = matrix[i][j]; return toReturn; } void fill_bins_p3(int N, int M, int hist[], int matrix[N][N]) { #pragma omp parallel for reduction(+:hist[:HIST_BINS]) num_threads(THREADS) SCHEDULE for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { hist[k]++; break; } } } } }
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "compressed_graph.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_> > e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } return SquishGraph(g); } size_t computeRowEncodingBytes(const CSRGraph<NodeID_, DestID_, invert> &g, int64_t row, bool out_vertices) { size_t bytesToEncode = 8; // Starting bytes for degree of vertex and vertex number of start vertex auto neighbor_list = out_vertices? g.out_neigh(row) : g.in_neigh(row); DestID_ prev_vertex = *neighbor_list.begin(); for(auto curr = (1 + neighbor_list.begin() ); curr < neighbor_list.end(); ++curr) { bytesToEncode += sizeof(vertexOffset); if (*curr - prev_vertex >= (DestID_)MAX_OFFSET) { bytesToEncode += sizeof(DestID_); } prev_vertex = *curr; } return bytesToEncode; } size_t computeBytesForDeltaGraph(const CSRGraph<NodeID_, DestID_, invert> &g, pvector<DestID_>& rowLengths, bool out_vertices) { size_t totalBytes = 0; //#pragma omp parallel for reduction(+ : totalBytes) for(int64_t n = 0; n < g.num_nodes(); ++n) { size_t bytes_for_row = computeRowEncodingBytes(g, n, out_vertices); rowLengths[n] = bytes_for_row; totalBytes += bytes_for_row; } return totalBytes; } void applyDeltaCompress(const CSRGraph<NodeID_, DestID_, invert> &g, size_t totalBytes, const pvector<SGOffset> &offsets, int64_t** index_ptr, DestID_** neigh_ptr, bool out_vertices) { const int64_t vertices = g.num_nodes(); *index_ptr = new int64_t[vertices + 1]; *neigh_ptr = new DestID_[totalBytes]; int64_t* index = *index_ptr; DestID_* neighs = *neigh_ptr; #pragma omp parallel for for(int64_t n = 0; n <= vertices; ++n) { index[n] = offsets[n]; } #pragma omp parallel for for(int64_t n = 0; n < vertices; ++n) { // auto end_ptr = (vertexOffset *)index[n+1]; DestID_* start_ptr = (DestID_ *)((uint64_t)neighs + index[n]); *start_ptr = out_vertices? (NodeID_) g.out_degree(n) : (NodeID_) g.in_degree(n); ++start_ptr; auto neighbor_list = out_vertices? g.out_neigh(n) : g.in_neigh(n); *start_ptr = *neighbor_list.begin(); ++start_ptr; auto vOffset_ptr = (vertexOffset *) start_ptr; DestID_ prev_vertex = *neighbor_list.begin(); for(auto curr = (1 + neighbor_list.begin()); curr < neighbor_list.end(); ++curr) { const int64_t delta = (*curr - prev_vertex); if ( delta < MAX_OFFSET) { *vOffset_ptr = delta; ++vOffset_ptr; } else { *vOffset_ptr = MAX_OFFSET; ++vOffset_ptr; auto write_ptr = (NodeID_ *)vOffset_ptr; *write_ptr = *curr; ++write_ptr; vOffset_ptr = (vertexOffset *)write_ptr; } prev_vertex = *curr; } } } DeltaGraph<NodeID_, DestID_, invert> MakeDeltaGraph() { CSRGraph<NodeID_, DestID_, invert> g = MakeGraph(); const double conversion = 1024*1024; double original_size = g.get_byte_size() / conversion; PrintData("Original size (MB)", original_size); Timer t; t.Start(); int64_t *in_index, *out_index; DestID_ *out_neighs, *in_neighs; pvector<DestID_> rowLengths(g.num_nodes()); size_t totalBytes = computeBytesForDeltaGraph(g, rowLengths, true); pvector<SGOffset> offsets = ParallelPrefixSum(rowLengths); applyDeltaCompress(g, totalBytes, offsets, &out_index, &out_neighs, true); if (g.directed()) { if (invert) { totalBytes = computeBytesForDeltaGraph(g, rowLengths, false); offsets = ParallelPrefixSum(rowLengths); applyDeltaCompress(g, totalBytes, offsets, &in_index, &in_neighs, false); } t.Stop(); auto delta_g = DeltaGraph<NodeID_, DestID_, invert>(g.num_nodes(), g.num_edges(), out_index, out_neighs, in_index, in_neighs); double new_size = delta_g.get_byte_size() / conversion; PrintData("Delta size (MB)", new_size); PrintData("Compression ratio", new_size/original_size); PrintTime("Compress Time", t.Seconds()); return delta_g; } else { t.Stop(); auto delta_g = DeltaGraph<NodeID_, DestID_, invert>(g.num_nodes(), g.num_edges(), out_index, out_neighs); double new_size = delta_g.get_byte_size() / conversion; PrintData("Delta size (MB)", new_size); PrintData("Compression ratio", new_size/original_size); PrintTime("Compress Time", t.Seconds()); return delta_g; } } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
target_data_array_extension.c
// -------------------------------------------------- // Check extends before // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // -------------------------------------------------- // Check extends after // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // END. #include <stdio.h> #define BEFORE 0 #define AFTER 1 #define SIZE 100 #if EXTENDS == BEFORE # define SMALL_BEG (SIZE-2) # define SMALL_END SIZE # define LARGE_BEG 0 # define LARGE_END SIZE #elif EXTENDS == AFTER # define SMALL_BEG 0 # define SMALL_END 2 # define LARGE_BEG 0 # define LARGE_END SIZE #else # error EXTENDS undefined #endif #define SMALL_SIZE (SMALL_END-SMALL_BEG) #define LARGE_SIZE (LARGE_END-LARGE_BEG) #define SMALL SMALL_BEG:SMALL_SIZE #define LARGE LARGE_BEG:LARGE_SIZE int main() { int arr[SIZE]; // CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG], SMALL_SIZE * sizeof arr[0]); // CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG], LARGE_SIZE * sizeof arr[0]); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: arr[LARGE]) { #pragma omp target data map(present, tofrom: arr[SMALL]) ; } // CHECK: arr is present fprintf(stderr, "arr is present\n"); // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes) // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes) // CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier). // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(alloc: arr[SMALL]) { #pragma omp target data map(present, tofrom: arr[LARGE]) ; } // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
GB_unop__isfinite_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isfinite_bool_fp32 // op(A') function: GB_unop_tran__isfinite_bool_fp32 // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isfinite_bool_fp32 ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isfinite_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantumScale*alpha * QuantumScale*beta; ** opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also define that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { double SaSca; if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); SaSca=Sa*PerceptibleReciprocal(Sca); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da*PerceptibleReciprocal(Dca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p+q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p-q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)*PerceptibleReciprocal(Da)+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { MagickRealType alpha, beta; alpha=Dca*PerceptibleReciprocal(Da); if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)*PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) memcpy(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) memcpy(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { canvas.red=(MagickRealType) GetPixelRed(p); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=QuantumRange-source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) memcpy(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) memcpy(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
GB_unaryop__abs_int8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_uint64 // op(A') function: GB_tran__abs_int8_uint64 // C type: int8_t // A type: uint64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_uint64 ( int8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
TimeCluster.h
/****************************************************************************** ** Copyright (c) 2015, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ /** * @file * This file is part of SeisSol. * * @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer) * * @section LICENSE * Copyright (c) 2013-2015, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * LTS cluster in SeisSol. **/ #ifndef TIMECLUSTER_H_ #define TIMECLUSTER_H_ #ifdef USE_MPI #include <mpi.h> #include <list> #endif #include <Initializer/typedefs.hpp> #include <SourceTerm/typedefs.hpp> #include <utils/logger.h> #include <Initializer/LTS.h> #include <Initializer/tree/LTSTree.hpp> #include <Kernels/Time.h> #include <Kernels/Local.h> #include <Kernels/Neighbor.h> #include <Kernels/DynamicRupture.h> #include <Kernels/Plasticity.h> #include <Solver/FreeSurfaceIntegrator.h> #include <Monitoring/LoopStatistics.h> #include <Kernels/TimeCommon.h> #ifdef ACL_DEVICE #include <device.h> #include <Solver/Pipeline/DrPipeline.h> #endif namespace seissol { namespace time_stepping { class TimeCluster; } namespace kernels { class ReceiverCluster; } } /** * Time cluster, which represents a collection of elements having the same time step width. **/ class seissol::time_stepping::TimeCluster { public: //! cluster id on this rank const unsigned int m_clusterId; //! global cluster cluster id const unsigned int m_globalClusterId; private: bool usePlasticity; //! number of time steps unsigned long m_numberOfTimeSteps; /* * integrators */ //! time kernel kernels::Time m_timeKernel; //! local kernel kernels::Local m_localKernel; //! neighbor kernel kernels::Neighbor m_neighborKernel; kernels::DynamicRupture m_dynamicRuptureKernel; /* * mesh structure */ struct MeshStructure *m_meshStructure; /* * global data */ //! global data structures GlobalData *m_globalDataOnHost{nullptr}; GlobalData *m_globalDataOnDevice{nullptr}; #ifdef ACL_DEVICE device::DeviceInstance& device = device::DeviceInstance::getInstance(); dr::pipeline::DrPipeline drPipeline; #endif /* * element data and mpi queues */ #ifdef USE_MPI //! pending copy region sends std::list< MPI_Request* > m_sendQueue; //! pending ghost region receives std::list< MPI_Request* > m_receiveQueue; #endif seissol::initializers::TimeCluster* m_clusterData; seissol::initializers::TimeCluster* m_dynRupClusterData; seissol::initializers::LTS* m_lts; seissol::initializers::DynamicRupture* m_dynRup; //! time step width of the performed time step. double m_timeStepWidth; //! Mapping of cells to point sources sourceterm::CellToPointSourcesMapping const* m_cellToPointSources; //! Number of mapping of cells to point sources unsigned m_numberOfCellToPointSourcesMappings; //! Point sources sourceterm::PointSources const* m_pointSources; //! true if dynamic rupture faces are present bool m_dynamicRuptureFaces; enum ComputePart { LocalInterior = 0, NeighborInterior, DRNeighborInterior, #ifdef USE_MPI LocalCopy, NeighborCopy, DRNeighborCopy, #endif DRFrictionLawCopy, DRFrictionLawInterior, PlasticityCheck, PlasticityYield, NUM_COMPUTE_PARTS }; long long m_flops_nonZero[NUM_COMPUTE_PARTS]; long long m_flops_hardware[NUM_COMPUTE_PARTS]; //! Tv parameter for plasticity double m_tv; //! Relax time for plasticity double m_oneMinusIntegratingFactor; //! Stopwatch of TimeManager LoopStatistics* m_loopStatistics; unsigned m_regionComputeLocalIntegration; unsigned m_regionComputeNeighboringIntegration; unsigned m_regionComputeDynamicRupture; kernels::ReceiverCluster* m_receiverCluster; #ifdef USE_MPI /** * Receives the copy layer data from relevant neighboring MPI clusters. **/ void receiveGhostLayer(); /** * Sends the associated regions of the copy layer to relevant neighboring MPI clusters **/ void sendCopyLayer(); #if defined(_OPENMP) && defined(USE_COMM_THREAD) /** * Inits Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void initReceiveGhostLayer(); /** * Inits Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void initSendCopyLayer(); /** * Waits until the initialization of the communication is finished. **/ void waitForInits(); #endif /** * Tests for pending ghost layer communication. **/ bool testForGhostLayerReceives(); /** * Tests for pending copy layer communication. **/ bool testForCopyLayerSends(); #endif /** * Writes the receiver output if applicable (receivers present, receivers have to be written). **/ void writeReceivers(); /** * Computes the source terms if applicable. **/ void computeSources(); /** * Computes dynamic rupture. **/ void computeDynamicRupture( seissol::initializers::Layer& layerData ); /** * Computes all cell local integration. * * This are: * * time integration * * volume integration * * local boundary integration * * Remark: After this step the DOFs are only updated half with the boundary contribution * of the neighborings cells missing. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param io_buffers time integration buffers. * @param io_derivatives time derivatives. * @param io_dofs degrees of freedom. **/ void computeLocalIntegration( seissol::initializers::Layer& i_layerData ); /** * Computes the contribution of the neighboring cells to the boundary integral. * * Remark: After this step (in combination with the local integration) the DOFs are at the next time step. * TODO: This excludes dynamic rupture contribution. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param i_faceNeighbors pointers to neighboring time buffers or derivatives. * @param io_dofs degrees of freedom. **/ void computeNeighboringIntegration( seissol::initializers::Layer& i_layerData ); #ifndef ACL_DEVICE template<bool usePlasticity> std::pair<long, long> computeNeighboringIntegrationImplementation(seissol::initializers::Layer& i_layerData) { SCOREP_USER_REGION( "computeNeighboringIntegration", SCOREP_USER_REGION_TYPE_FUNCTION ) m_loopStatistics->begin(m_regionComputeNeighboringIntegration); real* (*faceNeighbors)[4] = i_layerData.var(m_lts->faceNeighbors); CellDRMapping (*drMapping)[4] = i_layerData.var(m_lts->drMapping); CellLocalInformation* cellInformation = i_layerData.var(m_lts->cellInformation); PlasticityData* plasticity = i_layerData.var(m_lts->plasticity); real (*pstrain)[7 * NUMBER_OF_ALIGNED_BASIS_FUNCTIONS] = i_layerData.var(m_lts->pstrain); unsigned numberOTetsWithPlasticYielding = 0; kernels::NeighborData::Loader loader; loader.load(*m_lts, i_layerData); real *l_timeIntegrated[4]; real *l_faceNeighbors_prefetch[4]; #ifdef _OPENMP #pragma omp parallel for schedule(static) default(none) private(l_timeIntegrated, l_faceNeighbors_prefetch) shared(cellInformation, loader, faceNeighbors, pstrain, i_layerData, plasticity, drMapping) reduction(+:numberOTetsWithPlasticYielding) #endif for( unsigned int l_cell = 0; l_cell < i_layerData.getNumberOfCells(); l_cell++ ) { auto data = loader.entry(l_cell); seissol::kernels::TimeCommon::computeIntegrals(m_timeKernel, data.cellInformation.ltsSetup, data.cellInformation.faceTypes, m_subTimeStart, m_timeStepWidth, faceNeighbors[l_cell], #ifdef _OPENMP *reinterpret_cast<real (*)[4][tensor::I::size()]>(&(m_globalDataOnHost->integrationBufferLTS[omp_get_thread_num()*4*tensor::I::size()])), #else *reinterpret_cast<real (*)[4][tensor::I::size()]>(m_globalData->integrationBufferLTS), #endif l_timeIntegrated); #ifdef ENABLE_MATRIX_PREFETCH #pragma message("the current prefetch structure (flux matrices and tDOFs is tuned for higher order and shouldn't be harmful for lower orders") l_faceNeighbors_prefetch[0] = (cellInformation[l_cell].faceTypes[1] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][1] : drMapping[l_cell][1].godunov; l_faceNeighbors_prefetch[1] = (cellInformation[l_cell].faceTypes[2] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][2] : drMapping[l_cell][2].godunov; l_faceNeighbors_prefetch[2] = (cellInformation[l_cell].faceTypes[3] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][3] : drMapping[l_cell][3].godunov; // fourth face's prefetches if (l_cell < (i_layerData.getNumberOfCells()-1) ) { l_faceNeighbors_prefetch[3] = (cellInformation[l_cell+1].faceTypes[0] != FaceType::dynamicRupture) ? faceNeighbors[l_cell+1][0] : drMapping[l_cell+1][0].godunov; } else { l_faceNeighbors_prefetch[3] = faceNeighbors[l_cell][3]; } #endif m_neighborKernel.computeNeighborsIntegral( data, drMapping[l_cell], #ifdef ENABLE_MATRIX_PREFETCH l_timeIntegrated, l_faceNeighbors_prefetch #else l_timeIntegrated #endif ); if constexpr (usePlasticity) { numberOTetsWithPlasticYielding += seissol::kernels::Plasticity::computePlasticity( m_oneMinusIntegratingFactor, m_timeStepWidth, m_tv, m_globalDataOnHost, &plasticity[l_cell], data.dofs, pstrain[l_cell] ); } #ifdef INTEGRATE_QUANTITIES seissol::SeisSol::main.postProcessor().integrateQuantities( m_timeStepWidth, i_layerData, l_cell, dofs[l_cell] ); #endif // INTEGRATE_QUANTITIES } const long long nonZeroFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_nonZero[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_nonZero[PlasticityYield]; const long long hardwareFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_hardware[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_hardware[PlasticityYield]; m_loopStatistics->end(m_regionComputeNeighboringIntegration, i_layerData.getNumberOfCells()); return {nonZeroFlopsPlasticity, hardwareFlopsPlasticity}; } #endif // ACL_DEVICE void computeLocalIntegrationFlops(unsigned numberOfCells, CellLocalInformation const* cellInformation, long long& nonZeroFlops, long long& hardwareFlops); void computeNeighborIntegrationFlops( unsigned numberOfCells, CellLocalInformation const* cellInformation, CellDRMapping const (*drMapping)[4], long long& nonZeroFlops, long long& hardwareFlops, long long& drNonZeroFlops, long long& drHardwareFlops ); void computeDynamicRuptureFlops( seissol::initializers::Layer& layerData, long long& nonZeroFlops, long long& hardwareFlops ); void computeFlops(); //! Update relax time for plasticity void updateRelaxTime() { m_oneMinusIntegratingFactor = (m_tv > 0.0) ? 1.0 - exp(-m_timeStepWidth / m_tv) : 1.0; } public: //! flags identifiying if the respective part is allowed to be updated struct { bool localCopy; bool neighboringCopy; bool localInterior; bool neighboringInterior; } m_updatable; #ifdef USE_MPI //! send true LTS buffers volatile bool m_sendLtsBuffers; #endif //! reset lts buffers before performing time predictions volatile bool m_resetLtsBuffers; /* Sub start time of width respect to the next cluster; use 0 if not relevant, for example in GTS. * LTS requires to evaluate a partial time integration of the derivatives. The point zero in time refers to the derivation of the surrounding time derivatives, which * coincides with the last completed time step of the next cluster. The start/end of the time step is the start/end of this clusters time step relative to the zero point. * Example: * <verb> * 5 dt * |-----------------------------------------------------------------------------------------| <<< Time stepping of the next cluster (Cn) (5x larger than the current). * | | | | | | * |*****************|*****************|+++++++++++++++++| | | <<< Status of the current cluster. * | | | | | | * |-----------------|-----------------|-----------------|-----------------|-----------------| <<< Time stepping of the current cluster (Cc). * 0 dt 2dt 3dt 4dt 5dt * </verb> * * In the example above two clusters are illustrated: Cc and Cn. Cc is the current cluster under consideration and Cn the next cluster with respect to LTS terminology. * Cn is currently at time 0 and provided Cc with derivatives valid until 5dt. Cc updated already twice and did its last full update to reach 2dt (== subTimeStart). Next * computeNeighboringCopy is called to accomplish the next full update to reach 3dt (+++). Besides working on the buffers of own buffers and those of previous clusters, * Cc needs to evaluate the time prediction of Cn in the interval [2dt, 3dt]. */ double m_subTimeStart; //! number of full updates the cluster has performed since the last synchronization unsigned int m_numberOfFullUpdates; //! simulation time of the last full update (this is a complete volume and boundary integration) double m_fullUpdateTime; //! final time of the prediction (derivatives and time integrated DOFs). double m_predictionTime; //! time of the next receiver output double m_receiverTime; /** * Constructs a new LTS cluster. * * @param i_clusterId id of this cluster with respect to the current rank. * @param i_globalClusterId global id of this cluster. * @param usePlasticity true if using plasticity * @param i_timeKernel time integration kernel. * @param i_volumeKernel volume integration kernel. * @param i_boundaryKernel boundary integration kernel. * @param i_meshStructure mesh structure of this cluster. * @param i_copyCellInformation cell information in the copy layer. * @param i_interiorCellInformation cell information in the interior. * @param i_globalData global data. * @param i_copyCellData cell data in the copy layer. * @param i_interiorCellData cell data in the interior. * @param i_cells degrees of freedom, time buffers, time derivatives. **/ TimeCluster(unsigned int i_clusterId, unsigned int i_globalClusterId, bool usePlasticity, MeshStructure *i_meshStructure, CompoundGlobalData i_globalData, seissol::initializers::TimeCluster* i_clusterData, seissol::initializers::TimeCluster* i_dynRupClusterData, seissol::initializers::LTS* i_lts, seissol::initializers::DynamicRupture* i_dynRup, LoopStatistics* i_loopStatistics); /** * Destructor of a LTS cluster. * TODO: Currently prints only statistics in debug mode. **/ ~TimeCluster(); double timeStepWidth() const { return m_timeStepWidth; } void setTimeStepWidth(double timestep) { m_timeStepWidth = timestep; updateRelaxTime(); m_dynamicRuptureKernel.setTimeStepWidth(timestep); } /** * Adds a source to the cluster. * * @param i_meshId mesh id of the point of interest. **/ void addSource( unsigned int i_meshId ); /** * Sets the pointer to the cluster's point sources * * @param i_cellToPointSources Contains mappings of 1 cell offset to m point sources * @param i_numberOfCellToPointSourcesMappings Size of i_cellToPointSources * @param i_pointSources pointer to all point sources used on this cluster */ void setPointSources( sourceterm::CellToPointSourcesMapping const* i_cellToPointSources, unsigned i_numberOfCellToPointSourcesMappings, sourceterm::PointSources const* i_pointSources ); void setReceiverCluster( kernels::ReceiverCluster* receiverCluster) { m_receiverCluster = receiverCluster; } /** * Set Tv constant for plasticity. */ void setTv(double tv) { m_tv = tv; updateRelaxTime(); } #ifdef USE_MPI /** * Computes cell local integration of all cells in the copy layer and initiates the corresponding communication. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration * * @return true if the update (incl. communication requests), false if the update failed due to unfinshed sends of copy data to MPI neighbors. **/ bool computeLocalCopy(); #endif /** * Computes cell local integration of all cells in the interior. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration **/ void computeLocalInterior(); #ifdef USE_MPI /** * Computes the neighboring contribution to the boundary integral for all cells in the copy layer. * * @return true if the update (incl. communication requests), false if the update failed due to missing data from neighboring ranks. **/ bool computeNeighboringCopy(); #endif /** * Computes the neighboring contribution to the boundary integral for all cells in the interior. **/ void computeNeighboringInterior(); /** * Returns number of cells managed by this cluster. * @return Number of cells */ long getNumberOfCells() const; #if defined(_OPENMP) && defined(USE_MPI) && defined(USE_COMM_THREAD) /** * Tests for pending ghost layer communication, active when using communication thread **/ void pollForGhostLayerReceives(); /** * Polls for pending copy layer communication, active when using communication thread **/ void pollForCopyLayerSends(); /** * Start Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void startReceiveGhostLayer(); /** * start Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void startSendCopyLayer(); #endif }; #endif
HestonInnerLoop.h
//Declare function -- calculates a heston european option price double HestonInnerLoop(double S, double K, double r, double T, double IV, int OptionType, double Kappa, double Theta, double Gamma, double Rho, int ChunkSize, int M); double HestonInnerLoop(double S, double K, double r, double T, double IV, int OptionType, double Kappa, double Theta, double Gamma, double Rho, int ChunkSize, int M) { //Seed the RNG std::random_device rd; std::mt19937 gen(rd()); std::normal_distribution<double> RNORM(0.0, 1.0); //Allocate memory for core arrays (on the stack) and values double StockPaths[ChunkSize][M] = {}; double VolatilityPaths[ChunkSize][M] = {}; double dW1[ChunkSize][M] = {}; double dW2[ChunkSize][M] = {}; double Payoff = 0; //Calculate parameters double dt = T/M; double SqrtDt = sqrt(dt); double RhoCoef = sqrt(1-Rho*Rho); int Offset = ChunkSize/2; //Fill out arrays with correlated gaussian random numbers for (int i=0; i<Offset; ++i) { for (int j=0; j<M; ++j) { dW1[i][j] = RNORM(gen); dW1[i + Offset][j] = dW1[i][j]; //Antithetic paths dW2[i][j] = dW1[i][j]*Rho + RhoCoef*RNORM(gen); dW2[i + Offset][j] = dW2[i][j]; //Antithetic paths } } //Set initial price and volatility for(int i = 0; i < ChunkSize; ++i) { StockPaths[i][0] = S; VolatilityPaths[i][0] = IV; } //Iterate through time with Milstein discretization #pragma omp parallel for for(int i = 0; i < ChunkSize; ++i) { for(int j = 1; j < M; ++j) { //Path iteration VolatilityPaths[i][j] = VolatilityPaths[i][j-1] + Kappa * (Theta - VolatilityPaths[i][j-1]) * dt + Gamma * sqrt(VolatilityPaths[i][j-1]) * SqrtDt * dW2[i][j]; StockPaths[i][j] = StockPaths[i][j-1] + r * StockPaths[i][j-1] * dt + sqrt(VolatilityPaths[i][j-1]) * StockPaths[i][j-1] * SqrtDt * dW1[i][j]; //std::cout << StockPaths[i][j] << std::endl; //Volatility truncation if(VolatilityPaths[i][j] < 0) VolatilityPaths[i][j] = 0; } } //Determine the option payoffs if(OptionType) { //Put for(int i = 0; i < ChunkSize; ++i) { if((K - StockPaths[i][(M-1)]) > 0) { Payoff += (K - StockPaths[i][(M-1)]); } } } else { //Call for(int i = 0; i < ChunkSize; ++i) { if((StockPaths[i][(M-1)] - K) > 0) { Payoff += (StockPaths[i][(M-1)] - K); } } } //std::cout << exp(-r*T)*Payoff/ChunkSize << std::endl; return Payoff; }
GB_unop__acosh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acosh_fc64_fc64 // op(A') function: GB_unop_tran__acosh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = cacosh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cacosh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = cacosh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOSH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acosh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cacosh (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acosh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_resize_vector.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_CONTAINER_PARALLEL_RESIZE_VECTOR_H_ #define CORE_CONTAINER_PARALLEL_RESIZE_VECTOR_H_ #include <cstdlib> #include <vector> #include "core/util/root.h" namespace bdm { /// \brief std::vector with parallel resize template <typename T> class ParallelResizeVector { public: using iterator = T*; using const_iterator = const T*; using value_type = T; explicit ParallelResizeVector(TRootIOCtor* io_ctor) {} // Constructor for ROOT I/O ParallelResizeVector() {} ParallelResizeVector(std::initializer_list<T> init) { reserve(init.size()); for (auto& el : init) { push_back(el); } } ParallelResizeVector(const ParallelResizeVector& other) { if (other.data_ != nullptr && other.capacity_ != 0) { reserve(other.capacity_); // initialize using copy ctor #pragma omp parallel for for (std::size_t i = 0; i < other.size_; i++) { new (&(data_[i])) T(other.data_[i]); } } size_ = other.size_; capacity_ = other.capacity_; } virtual ~ParallelResizeVector() { if (data_ != nullptr) { #pragma omp parallel for for (std::size_t i = 0; i < size_; i++) { data_[i].~T(); } capacity_ = 0; free(data_); data_ = nullptr; } } std::size_t size() const { return size_; } // NOLINT T* data() noexcept { return data_; } // NOLINT const T* data() const noexcept { return data_; } // NOLINT void swap(ParallelResizeVector& other) { // NOLINT // size_ size_ ^= other.size_; other.size_ ^= size_; size_ ^= other.size_; // capacity_ capacity_ ^= other.capacity_; other.capacity_ ^= capacity_; capacity_ ^= other.capacity_; // data_ auto* tmp = data_; data_ = other.data_; other.data_ = tmp; } std::size_t capacity() const { return capacity_; } // NOLINT void push_back(const T& element) { // NOLINT if (capacity_ == size_) { reserve(capacity_ * kGrowFactor); } new (&(data_[size_++])) T(element); } void reserve(std::size_t new_capacity) { // NOLINT if (new_capacity > capacity_) { T* new_data = static_cast<T*>(malloc(new_capacity * sizeof(T))); if (data_ != nullptr) { // initialize using copy ctor #pragma omp parallel for for (std::size_t i = 0; i < size_; i++) { new (&(new_data[i])) T(data_[i]); } // destruct old elements #pragma omp parallel for for (std::size_t i = 0; i < size_; i++) { data_[i].~T(); } free(data_); } data_ = new_data; capacity_ = new_capacity; } } void resize(std::size_t new_size, const T& t = T()) { // NOLINT if (capacity_ < new_size) { reserve(new_size); } // grow #pragma omp parallel for for (std::size_t i = size_; i < new_size; i++) { new (&(data_[i])) T(t); } // shrink #pragma omp parallel for for (std::size_t i = new_size; i < size_; i++) { data_[i].~T(); } size_ = new_size; } void clear() { // NOLINT for (std::size_t i = 0; i < size_; i++) { data_[i].~T(); } size_ = 0; } ParallelResizeVector& operator=(const ParallelResizeVector& other) { free(data_); data_ = nullptr; reserve(other.capacity_); size_ = other.size_; capacity_ = other.capacity_; #pragma omp parallel for for (std::size_t i = 0; i < size_; i++) { data_[i] = other.data_[i]; } return *this; } T& operator[](std::size_t index) { return data_[index]; } const T& operator[](std::size_t index) const { return data_[index]; } iterator begin() { return &(data_[0]); } // NOLINT iterator end() { return &(data_[size_]); } // NOLINT const_iterator cbegin() { return &(data_[0]); } // NOLINT const_iterator cend() { return &(data_[size_]); } // NOLINT private: static constexpr float kGrowFactor = 1.5; std::size_t size_ = 0; UInt_t capacity_ = 0; T* data_ = nullptr; //[capacity_] // NOLINT BDM_CLASS_DEF(ParallelResizeVector, 1); // NOLINT }; } // namespace bdm #endif // CORE_CONTAINER_PARALLEL_RESIZE_VECTOR_H_
thread_variable.h
#ifndef COMPLIANT_THREAD_VARIABLE_H #define COMPLIANT_THREAD_VARIABLE_H #if defined(WIN32) #include <omp.h> #endif template<class A> class thread_variable { typedef std::map<int, A*> value_type; value_type value; static int id() { #ifdef _OPENMP int res = omp_get_thread_num(); #else int res = 0; #endif return res; } A* get() { int i = id(); typename value_type::iterator it; it = value.find( i ); if( it == value.end() ) { A* a = new A; value[ i ] = a; return a; } else { return it->second; } } public: A* operator->() { A* res = 0; #ifdef _OPENMP #pragma omp critical #endif res = get(); return res; } void clear() { for(typename value_type::iterator it = value.begin(), end = value.end(); it != end; ++it) { delete it->second; } value.clear(); } ~thread_variable() { clear(); } }; #endif
omp_mm.c
/****************************************************************************** * FILE: omp_mm.c * DESCRIPTION: * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. Threads share row iterations * according to a predefined chunk size. * AUTHOR: Blaise Barney * LAST REVISED: 06/28/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define NRA 62 /* number of rows in matrix A */ #define NCA 15 /* number of columns in matrix A */ #define NCB 7 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ chunk = 10; /* set loop iteration chunk size */ /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ ======> Diretiva OpenMP for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; ======> Diretiva OpenMP for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; ======> Diretiva OpenMP for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n",tid); ======> Diretiva OpenMP for (i=0; i<NRA; i++) { printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } /*** End of parallel region ***/ /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); }
GB_unaryop__abs_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_int32 // op(A') function: GB_tran__abs_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
/*************** 2D LBM-DEM Code **************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <unistd.h> #include <time.h> #include <assert.h> #include "visit_writer.h" #ifdef _OPENMP #include <omp.h> #endif // Switch on or off FLUID #define _FLUIDE_ // Maximum number of soil grains #ifndef nbgrainsMax #define nbgrainsMax 40000 #endif // Dimension of the LBM Fluid domain #ifndef scale #define scale 1. #endif #ifndef lx #define lx 7826 #endif #ifndef ly #define ly 2325 #endif #ifdef SINGLE_PRECISION typedef float real; #define FLOAT_FORMAT "%e" #else typedef double real; #define FLOAT_FORMAT "%le" #endif #define pi 3.14159265358979 #define rhoS 2650 // Density of solids #define rhoW 1000 // Density of water #define duration 1.5 // Duration of simulation //********************* Data LBM ************************ #define Q 9 int nbgrains; // width of LBM grid size, time step, lattice speed real dx, dtLB, c, c_squ; real _w[Q] = {4. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9, 1. / 36, 1. / 9}; real * restrict w = _w; real (* restrict f)[ly][Q]; // ************************************ // * * // * e2 e9 e8 * // * \ | / * // * \ | / * // * e3--- e1 ---e7 * // * / | \ * // * / | \ * // * e4 e5 e6 * // * * // ************************************ int ex[Q] = {0, -1, -1, -1, 0, 1, 1, 1, 0}; int ey[Q] = {0, 1, 0, -1, -1, -1, 0, 1, 1}; // average fluid density real rho_moy = 1000; // air density =1 or water =1000 or 999.7 at 20 real rho_outlet, q_outlet; // relaxation parameter real tau = 0.504; real s2 = 1.5, s3 = 1.4, s5 = 1.5, s7 = 1.5, s8 = 1.9841, s9 = 1.9841; // s8=1.6666667,s9=1.6666667; // obstacle array int (* restrict obst)[ly]; // obstacle activity array int (* restrict act)[ly]; real (* restrict delta)[ly][Q]; // LB diameter for the smallest disk (in nodes number) real rMin_LB = 10.; // Fluid kinematic viscosity real nu = 1e-6; // 15.5e-6 for air and 1e-6 for water at 293K or 20C real (* restrict press)[ly]; real reductionR = 0.85; // LBM reduced grain diameter //*********** Data DEM ******************** real G = 9.81; real angleG = 0.0; real xG, yG; real dt; //=5.e-8; real dt2; // Spring stiffness real km = 3e+6, kg = 1.6e+6; // 2e8 1.6e8 real kt = 1.0e+6, ktm = 2e+6; /// changed to higher for lesser /// interpenetration in sample generation 1.3e8 real nug = 6.4e+1; // 1.1e1 real num = 8.7e+1, numb = 8.7e+1; // 1.5e1 real nuf = 1.5e-1, nugt = 5e-1; // frictionless packing nugt real mu = .5317; real mug = 0.0; // Mu for assembling real mum = .466, mumb = .466; // 0.466 //0.53 0.51 0.43 real murf = 0.01; // 0.01 real r = 1e-3; // 5e-4;v real distVerlet = 5e-4; // changed to 1e-6 from 1e-3 for error long UpdateVerlet = 100.; real dtt = 0.; // Time after which wall to prepare sample is removed real iterDEM = 100.; // number of DEM iterations per LBM // Tracked stats // EPE-Effective potential energy // Total Wall Friction - WF, // SE- Strain Energy ESE & IFR- Total Internal Friction real xfront, height, energie_cin, energie_x, energie_y, energie_teta, energy_p, energy_EPE, zmean, SE, ESE, WF, IFR; real TSE = 0.0, TBW = 0.0, INCE = 0.0, TSLIP = 0.0, TRW = 0.0; // Total Body Work and Strain Energy TRW_ Total Rotational Work real pf = 0., pft = 0., pff = 0.; // previous force real ic = 0; // ******** Control parameters ************* // Number of DEM steps in LB int npDEM; real *rLB; int stepView = 400; int stepPrint = 800; int stepConsole = 400; int stepStrob = 4000; //visualisation steps int stepFilm = 8000; FILE* s_stats; int nFile = 0; // Nth File saves LB int cptFlash; int *cumul; int * restrict neighbours; // NeighbourWall Bottom, Right, Left & Top int * restrict neighbourWallB; int * restrict neighbourWallR; int * restrict neighbourWallL; int * restrict neighbourWallT; int nNeighWallb, nNeighWallt, nNeighWallL, nNeighWallR; int start = 0; long nbsteps = 0; int vib = 0; real freq = 5; real amp = 4.e-4; real t = 0; // Luding Friction Model struct contact { int i, j; real nx, ny; // normal vector from j to i real fn, ft; // force component in local frame }; // struct contact (*c1)[nbgrainsMax]; // struct contact *c2; struct force { real f1, f2, f3; }; struct force * restrict fhf; real * restrict fhf1, * restrict fhf2, * restrict fhf3; struct grain { real x1, x2, x3; real v1, v2, v3; real a1, a2, a3; real r, m, mw, It; real p; // Pressure on grain real s; // shear real f1, f2; // force real ifm, fm; // friction mobility real fr, ifr; // frictional energy wall and Internal real M11, M12, M21, M22; // Moments M11, M12, M21, M22 real ice, slip, rw; // Inelastic Collisional Energy, slip, & Rotational work int z; // number of contacts int zz; // number of contacts sans the Walls }; struct grain * restrict g; // Wall static real Mby = 0.; static real Mgx = 0.; static real Mhy = 0.; static real Mdx = 0.; real ecart_ini = 1.0; // *************************************************************************** // * utilities // *************************************************************************** real Maxt(real x, real y) { if (x < y) return 0.; else return y; } //--------------------------------------------------- real minimum_grain_radius(int nbgrains, struct grain g[nbgrains]) { real rMin = g[0].r; for (int i = 1; i <= nbgrains - 1; i++) { rMin = fmin(rMin, g[i].r); } return rMin; } //---------------------------------------------------- void swap(real* a, real* b) { real tmp; tmp = *a; *a = *b; *b = tmp; } // ******************************************************************* // * Output files * // ******************************************************************* void write_vtk(int nx, int ny, real f[nx][ny][Q], int nbgrains, struct grain g[nbgrains]) { char filename[255]; sprintf(filename, "lbm-dem_%.6i", nFile); char gpress[255]; sprintf(gpress, "grain_pressure_%.6i", nFile); char gvel[255]; sprintf(gvel, "grain_velocity_%.6i", nFile); char gacc[255]; sprintf(gacc, "grain_acceleration_%.6i", nFile); char fpress[255]; sprintf(fpress, "fluid_pressure_%.6i", nFile); char fvel[255]; sprintf(fvel, "fluid_velocity_%.6i", nFile); int dims[] = {nx, ny, 1}; float *xs = malloc(sizeof(float)*nx); float *ys = malloc(sizeof(float)*ny); float *zs = malloc(sizeof(float)*1); float pasxyz = 1. / nx; for (int i = 0; i < nx; i++) xs[i] = i*pasxyz; for (int i = 0; i < ny; i++) ys[i] = i*pasxyz; *zs = 0; int nvars = 5; int vardims[5][1] = {{1}, {3}, {3}, {1}, {3}}; int centering[5][1] = {{1}, {1}, {1}, {1}, {1}}; char* varnames[5][1] = {{"grain_pressure"}, {"grain_velocity"}, {"grain_acceleration"}, {"fluid_pressure"}, {"fluid_velocity"}}; char* filenames[] = {gpress, gvel, gacc, fpress, fvel}; float (*grain_pressure )[nx] = malloc(sizeof(float)*nx*ny); float (*grain_velocity )[nx][3] = malloc(sizeof(float)*nx*ny*3); float (*grain_acceleration)[nx][3] = malloc(sizeof(float)*nx*ny*3); float (*fluid_pressure )[nx] = malloc(sizeof(float)*nx*ny); float (*fluid_velocity )[nx][3] = malloc(sizeof(float)*nx*ny*3); float* vars[5][1] = {{(float*)grain_pressure}, {(float*)grain_velocity}, {(float*)grain_acceleration}, {(float*)fluid_pressure}, {(float*)fluid_velocity}}; for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { int i = obst[x][y]; if (i >= 0 && i < nbgrains) { grain_pressure[y][x] = g[i].p; grain_velocity[y][x][0] = g[i].v1; grain_velocity[y][x][1] = g[i].v2; grain_velocity[y][x][2] = 0.0; grain_acceleration[y][x][0] = g[i].a1; grain_acceleration[y][x][1] = g[i].a2; grain_acceleration[y][x][2] = 0.0; fluid_pressure[y][x] = 0.0; fluid_velocity[y][x][0] = 0.0; fluid_velocity[y][x][1] = 0.0; fluid_velocity[y][x][2] = 0.0; } else { grain_pressure[y][x] = -1; grain_velocity[y][x][0] = 0.0; grain_velocity[y][x][1] = 0.0; grain_velocity[y][x][2] = 0.0; grain_acceleration[y][x][0] = 0.0; grain_acceleration[y][x][1] = 0.0; grain_acceleration[y][x][2] = 0.0; fluid_pressure[y][x] = 0.0; fluid_velocity[y][x][0] = 0.0; fluid_velocity[y][x][1] = 0.0; fluid_velocity[y][x][2] = 0.0; for (int j = 0; j < Q; ++j) { fluid_pressure[y][x] += f[x][y][j]; fluid_velocity[y][x][0] += f[x][y][j] * ex[j]; fluid_velocity[y][x][1] += f[x][y][j] * ey[j]; } fluid_pressure[y][x] = (1. / 3.) * rho_moy * (fluid_pressure[y][x] - 1.); } } } // write_rectilinear_mesh(filename, 1, dims, xs, ys, zs, nvars, vardims, centering, varnames, vars); for (int i = 0; i < 5; ++i) write_rectilinear_mesh(filenames[i], 1, dims, xs, ys, zs, 1, vardims[i], centering[i], varnames[i], vars[i]); free(xs); free(ys); free(zs); free(grain_pressure); free(grain_velocity); free(grain_acceleration); free(fluid_velocity); free(fluid_pressure); } void write_DEM() { int i; char filename[25]; real N0, N1, N2, N3, N4, N5; // Percentage of particles in contact real xgrainmax; FILE* outfile; // Output file // sprintf(filename,"DEM_Grains%.6i.dat",nFile); sprintf(filename, "DEM%.6i.dat", nFile); outfile = fopen(filename, "w"); xfront = g[0].x1 + g[0].r; height = g[0].x2 + g[0].r; energie_cin = 0.; energie_x = 0.; energie_y = 0.; energie_teta = 0.; energy_p = 0.; energy_EPE = 0.; SE = 0.; ESE = 0.; WF = 0.; IFR = 0.; INCE = 0.; TSLIP = 0.; TRW = 0.; zmean = 0; xgrainmax = g[0].x1; N0 = 0; N1 = 0; N2 = 0; N3 = 0; N4 = 0; N5 = 0; for (i = 0; i < nbgrains; i++) { zmean += g[i].z; if (g[i].z == 0) N0 += 1; if (g[i].z == 1) N1 += 1; if (g[i].z == 2) N2 += 1; if (g[i].z == 3) N3 += 1; if (g[i].z == 4) N4 += 1; if (g[i].z == 5) N5 += 1; energie_x += 0.5 * g[i].m * g[i].v1 * g[i].v1; energie_y += 0.5 * g[i].m * g[i].v2 * g[i].v2; energie_teta += 0.5 * g[i].It * g[i].v3 * g[i].v3; energy_p += g[i].m * G * g[i].x2; /* if (nbsteps*dt>=dtt) */ SE += 0.5 * (((g[i].p * g[i].p) / kg) + ((g[i].s * g[i].s) / kt)); WF += g[i].fr; g[i].ifr = fabs(((g[i].m * G + g[i].f2) * (dt * g[i].v2 + dt2 * g[i].a2 / 2.)) + (g[i].f1 * (dt * g[i].v1 + dt2 * g[i].a1 / 2.))); // g[i].ifr=(g[i].f2*(dt*g[i].v2))+(g[i].f1*(dt*g[i].v1)); IFR += g[i].ifr; TSLIP += g[i].slip; TRW += g[i].rw; INCE += g[i].ice; TBW += g[i].ifr; ESE = 0.5 * (((g[i].p * g[i].p) / kg) + ((g[i].s * g[i].s) / kt)); TSE += ESE; // if(g[i].x2>(-1.632*g[i].x1+0.0408)){energy_EPE+=g[i].m*G*g[i].x2;} if (g[i].x1 + g[i].r > xgrainmax) { xgrainmax = g[i].x1 + g[i].r; } if (g[i].x2 + g[i].r > height) { height = g[i].x2 + g[i].r; } if (g[i].zz > 0 && g[i].x1 + g[i].r >= xfront) { xfront = g[i].x1 + g[i].r; } if (g[i].z == 0) { g[i].fm = 0; } else g[i].fm = g[i].ifm / g[i].z; fprintf(outfile, "%i\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%" "le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%le\t%" "le\t%i\n", i, g[i].r, g[i].x1, g[i].x2, g[i].x3, g[i].v1, g[i].v2, g[i].v3, g[i].a1, g[i].a2, g[i].a3, fhf1[i], fhf2[i], fhf3[i], g[i].p, g[i].s, ESE, g[i].fr, g[i].ifr, g[i].ice, g[i].slip, g[i].rw, g[i].fm, g[i].M11, g[i].M12, g[i].M21, g[i].M22, g[i].z); } energie_cin = energie_x + energie_y + energie_teta; zmean = zmean / nbgrains; s_stats = fopen("stats.data", "a"); fprintf(s_stats, "%le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le " "%le %le %le %le %le\n", nbsteps * dt - dtt, xfront, xgrainmax, height, zmean, energie_x, energie_y, energie_teta, energie_cin, N0 / nbgrains, N1 / nbgrains, N2 / nbgrains, N3 / nbgrains, N4 / nbgrains, N5 / nbgrains, energy_p, SE, WF, IFR, INCE, TSLIP, TRW); fclose(s_stats); fclose(outfile); } void write_forces() { int i, j; real dn; char nomfile[25]; FILE* outfile1; // Ouverture du fichier // sprintf(filename,"DEM_Grains%.6i.dat",nFile); sprintf(nomfile, "DEM%.6i.ps", nFile); outfile1 = fopen(nomfile, "w"); real margin = 10 * g[0].r, hrx1 = lx, hry2 = ly; fprintf(outfile1, "%%!PS-Adobe-3.0 EPSF-3.0 \n"); fprintf(outfile1, "%%%BoundingBox: %f %f %f %f \n", -margin, -margin, hrx1 + margin, hry2 + margin); fprintf(outfile1, "%%%Creator: Krishna Kumar \n"); fprintf(outfile1, "%%%Title: DEM Grains & Forces \n"); fprintf(outfile1, "0.1 setlinewidth 0.0 setgray \n"); for (i = 0; i <= nbgrains; i++) fprintf(outfile1, "newpath %le %le %le 0.0 setlinewidth %.2f setgray 0 360 arc gsave " "fill grestore\n", g[i].x1 * 10000, g[i].x2 * 10000, g[i].r * 10000, (0.8 - g[i].fm / 2)); for (i = 0; i <= nbgrains; i++) { for (j = 0; j <= nbgrains; j++) { dn = (sqrt((g[i].x1 - g[j].x1) * (g[i].x1 - g[j].x1) + (g[i].x2 - g[j].x2) * (g[i].x2 - g[j].x2))) - g[i].r - g[j].r; if (dn < -1e-10 && i != j) { // printf("dn for i %i and j %i are: %le \n",i,j,dn); fprintf(outfile1, "%le setlinewidth \n 0.0 setgray \n", 1.); // c1[i][j].fn); fprintf(outfile1, "1 setlinecap \n newpath \n"); fprintf(outfile1, "%le %le moveto \n %le %le lineto\n", g[i].x1 * 10000, g[i].x2 * 10000, g[j].x1 * 10000, g[j].x2 * 10000); fprintf(outfile1, "stroke \n"); } } } fclose(outfile1); } // -------------------------- void write_densities() { int x, y, i; real pasxyz; real P, u_x, u_y; char filename[25]; char filename_press[25]; FILE* outfile; FILE* s_press; sprintf(filename, "densities%.6i.vtk", nFile); sprintf(filename_press, "pressure_base%.6i.dat", nFile); pasxyz = 1. / lx; outfile = fopen(filename, "w"); s_press = fopen(filename_press, "w"); fprintf(outfile, "# vtk DataFile Version 2.0\n"); fprintf(outfile, "Outfile domain LB t: %e\n", t); fprintf(outfile, "ASCII\n"); fprintf(outfile, "DATASET RECTILINEAR_GRID\n"); fprintf(outfile, "DIMENSIONS %d %d 1\n", lx, ly); fprintf(outfile, "X_COORDINATES %d float\n", lx); for (i = 0; i <= lx - 1; i++) { fprintf(outfile, "%e ", (float)i * pasxyz); } fprintf(outfile, "\n"); fprintf(outfile, "Y_COORDINATES %d float\n", ly); for (i = 0; i <= ly - 1; i++) { fprintf(outfile, "%e ", (float)i * pasxyz); } fprintf(outfile, "\n"); fprintf(outfile, "Z_COORDINATES 1 float\n"); fprintf(outfile, "0\n"); // Pour LB fprintf(outfile, "POINT_DATA %d\n", lx * ly); fprintf(outfile, "SCALARS Pressure float 1\n"); fprintf(outfile, "LOOKUP_TABLE default\n"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { P = 0.; for (i = 0; i < Q; i++) { P += f[x][y][i]; } P = (1. / 3.) * rho_moy * (P - 1.); if (obst[x][y] < 0) { fprintf(outfile, "%.4lf\n", P); if (y == 2) { fprintf(s_press, "%le %le\n", x * pasxyz, P); } } else { fprintf(outfile, "%.4lf\n", 0.); if (y == 2) { fprintf(s_press, "%le %le\n", x * pasxyz, 0.0); } } } } fprintf(outfile, "VECTORS VecVelocity float\n"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { // P=rho_moy; u_x = 0.; u_y = 0.; for (i = 0; i < Q; i++) { u_x += f[x][y][i] * ex[i]; u_y += f[x][y][i] * ey[i]; } // P = (P-rho_moy)*1./3.; // P = (1./3.)*rho_moy*(P-1.); if (obst[x][y] < 0) { fprintf(outfile, "%.4lf %.4lf 0.\n", u_x, u_y); } else { fprintf(outfile, "%.4lf %.4lf 0.\n", 0., 0.); } } } fclose(s_press); fclose(outfile); } // ******************************************************************* // * sample initial * // ******************************************************************* void temp_sample() { long i, j, k; j = 0; k = 0; for (i = 0; i < nbgrains; ++i) { g[i].r = r; //*(real)(i+1)/nbgrains; g[i].m = rhoS * pi * g[i].r * g[i].r; #ifdef _FLUIDE_ g[i].mw = rhoW * pi * g[i].r * g[i].r; #else g[i].mw = 0; #endif // g[i].m=(4./3.)*rhoS*pi*g[i].r*g[i].r*g[i].r; g[i].It = g[i].m * g[i].r * g[i].r / 2; // g[i].It=(2./5.)*g[i].m*g[i].r*g[i].r; g[i].x1 = r * 1.5 + 2. * r * j; // Pente r*(1.5+(k/10.)) g[i].x2 = r + 2. * r * k; g[i].x3 = 0.; g[i].v1 = 0.; g[i].v2 = 0.; g[i].v3 = 0.; g[i].a1 = 0.; g[i].a2 = 0.; g[i].a3 = 0.; // if(j<=4) {j++;} else {j=0;k++;}; if (j < 0 && k == 0) { j++; } else { if (j <= 13) { j++; } else { j = 0; k++; }; }; } } struct grain* read_sample(char * filename_sample) { FILE *sample_file = fopen(filename_sample, "r"); char com[256]; fgets(com, 256, sample_file); printf("%s\n", com); fscanf(sample_file, "%d\n", &nbgrains); struct grain *g = malloc(sizeof(struct grain)*nbgrains); printf("Nb grains %d\n", nbgrains); for (int i = 0; i < nbgrains; ++i) { fscanf(sample_file, FLOAT_FORMAT" "FLOAT_FORMAT" "FLOAT_FORMAT";\n", &g[i].r, &g[i].x1, &g[i].x2); // printf("%le %le %le\n",g[i].r,g[i].x1,g[i].x2); g[i].r = g[i].r * r; g[i].m = rhoS * pi * g[i].r * g[i].r; g[i].It = g[i].m * g[i].r * g[i].r / 2; g[i].x1 = g[i].x1 * r; g[i].x2 = g[i].x2 * r; g[i].x3 = 0.; g[i].v1 = 0.; g[i].v2 = 0.; g[i].v3 = 0.; g[i].a1 = 0.; g[i].a2 = 0.; g[i].a3 = 0.; } fclose(sample_file); return g; } void check_sample(int nbgrains, struct grain g[nbgrains]) { real xMax = g[0].x1; real xMin = g[0].x1; real yMax = g[0].x2; real yMin = g[0].x2; real MassGrain = 0.; for (int i = 0; i < nbgrains; ++i) { MassGrain += g[i].m; xMax = fmax(xMax, g[i].x1 + g[i].r); xMin = fmin(xMin, g[i].x1 - g[i].r); yMax = fmax(yMax, g[i].x2 + g[i].r); yMin = fmin(yMin, g[i].x2 - g[i].r); } real L0 = xMax - xMin; real H0 = yMax - yMin; printf("L0=%le H0=%le Mass of Grains=%le Phi=%le\n", L0, H0, MassGrain, MassGrain / (rhoS * (L0 * H0))); } // ******************************************************************************************* // * Initialise obstacle array * // ******************************************************************************************* void init_obst() { int x, y, i, xi, yi, xf, yf; // c.d.g. sphere // real xc,yc; real dist2, r2, R2, xc, yc, rbl0; for (x = 1; x < lx - 1; x++) { for (y = 1; y < ly - 1; y++) { obst[x][y] = -1; } } for (x = 0; x < lx; x++) { obst[x][0] = obst[x][ly - 1] = nbgrains; act[x][0] = act[x][ly - 1] = 0; } for (y = 1; y < ly - 1; y++) { obst[0][y] = obst[lx - 1][y] = nbgrains; act[0][y] = act[lx - 1][y] = 0; } for (i = 0; i < nbgrains; i++) { xc = (g[i].x1 - Mgx) / dx; yc = (g[i].x2 - Mby) / dx; r2 = rLB[i] * rLB[i]; // Unreduced grain radius rbl0 = g[i].r / dx; R2 = rbl0 * rbl0; xi = (int)(xc - rbl0); xf = (int)(xc + rbl0); if (xi < 1) xi = 1; if (xf >= lx - 1) xf = lx - 2; yi = (int)(yc - rbl0); yf = (int)(yc + rbl0); if (yi < 1) yi = 1; if (yf >= ly - 1) yf = ly - 2; for (x = xi; x <= xf; x++) { for (y = yi; y <= yf; y++) { dist2 = (x - xc) * (x - xc) + (y - yc) * (y - yc); if (dist2 <= R2) { if (dist2 <= r2) obst[x][y] = i; } } } } } // ******************************************************************************************* // * Initialise density distribution function with equilibrium to zero density // * // ******************************************************************************************* void init_density(int nx, int ny, real f[nx][ny][Q]) { for (int x = 0; x < nx; x++) { for (int y = 0; y < ny; y++) { for (int iLB = 0; iLB < Q; iLB++) { f[x][y][iLB] = w[iLB]; } } } } // ******************************************************************* // * Calculate the forces on grains * // ******************************************************************* struct force force_grains(long i, long j) { // distance normale real dn, xOiOj, yOiOj, OiOj; real xn, yn; real vn, vxOiOj, vyOiOj, vt; real ftest; struct force f; double fn, ft; // distance relative xOiOj = g[i].x1 - g[j].x1; yOiOj = g[i].x2 - g[j].x2; OiOj = sqrt(xOiOj * xOiOj + yOiOj * yOiOj); dn = OiOj - g[i].r - g[j].r; // calculate the forces if (dn >= 0) { f.f1 = 0; f.f2 = 0; f.f3 = 0; } else { // relative normal velocity vxOiOj = g[i].v1 - g[j].v1; vyOiOj = g[i].v2 - g[j].v2; xn = xOiOj / OiOj; yn = yOiOj / OiOj; // Compute velocities at contact vn = vxOiOj * xn + vyOiOj * yn; // Tangential Velocity vt = -vxOiOj * yn + vyOiOj * xn - g[i].v3 * g[i].r - g[j].v3 * g[j].r; // calculate normal force fn = -kg * dn - nug * vn; if (fn < 0) fn = 0.0; ft = - kt * vt * dt; ftest = mu * fn; if (fabs(ft) > ftest) { if (ft < 0.0) ft = ftest; else ft = -ftest; } f.f1 = fn * xn - ft * yn; f.f2 = fn * yn + ft * xn; f.f3 = -Maxt(ft * g[i].r, fn * murf * g[i].r * g[j].r); g[i].p += fn; g[j].p += fn; g[i].f1 += f.f1; g[i].f2 += f.f2; g[i].s += ft; g[j].s += ft; g[i].slip += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); pft = ft; g[i].rw += fabs(f.f3) * (fabs(g[i].v3 * dt) + (fabs(f.f3 - pff)) / kt); pff = f.f3; g[i].z += 1; g[i].zz += 1; g[i].ice += ic; if (fn == 0) g[i].ifm = 0; else g[i].ifm += fabs(ft / (mu * fn)); // Stress computations g[i].M11 += f.f1 * xOiOj; g[i].M12 += f.f1 * yOiOj; g[i].M21 += f.f2 * xOiOj; g[i].M22 += f.f2 * yOiOj; } return f; } // ******************************************************************* // * Calculation of forces between the grains and Walls * // ******************************************************************* struct force force_WallB(long i, real dn) { real vn, vt, ftest; struct force f; real fn, ft; vn = g[i].v2; vt = g[i].v1; fn = -km * dn - num * vn; if (fn < 0) fn = 0.; ft = ktm * vt; //*dt; //Krishna ftest = mumb * fn; if (fabs(ft) > ftest) { if (ft < 0.0) ft = ftest; else ft = -ftest; } f.f1 = ft; f.f2 = fn; f.f3 = -(ft * g[i].r * murf); g[i].p += fn; g[i].s += ft; g[i].f1 += f.f1; g[i].z += 1; // Stress computations g[i].M11 += 0; g[i].M12 += f.f1 * dt; g[i].M21 += 0; g[i].M22 += f.f2 * dt; g[i].rw += fabs(f.f3) * (fabs(g[i].v3 * dt) + (fabs(f.f3 - pff)) / kt); g[i].fr += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); pff = f.f3; pft = ft; return f; } struct force force_WallT(long i, real dn) { real vn, vt, fn, ft, ftmax; struct force f; vn = g[i].v2; fn = km * dn - num * vn; ic += num * vn * vn * dt; if (fn > 0.) fn = 0.; // relative tangential velocity vt = g[i].v1 + g[i].v3 * g[i].r - amp * freq * cos(freq * t); ft = fabs(ktm * vt); // if (nbsteps*dt<dtt){mumb=mug;nugt=0;} if (vt >= 0) { ftmax = mumb * fn - nugt * vt; } // ic+=nugt*vt*vt*dt;} else { ftmax = mumb * fn + nugt * vt; } // ic+=nug*vt*vt*dt;} // ftmax=mum*fn-num*vt; if (ft > ftmax) ft = ftmax; if (vt > 0) ft = -ft; f.f1 = ft; f.f2 = fn; // f.f3=ft*g[i].r-fabs(murf*g[i].r*g[i].v3*fn); f.f3 = ft * g[i].r * murf; // f.f3=(ft-fabs(vt*nuf))*g[i].r; // Stress computations g[i].M11 += 0; g[i].M12 += f.f1 * fabs(dt); g[i].M21 += 0; g[i].M22 += f.f2 * fabs(dt); g[i].p += fn; g[i].s += ft; g[i].z += 1; // g[i].rw+=fabs(f.f3)*(fabs(g[i].v3*dt)+(fabs(f.f3-pff))/kt); // g[i].fr+=fabs(ft)*(fabs(vt*dt)+(fabs(ft-pft))/kt); // pff=f.f3;pft=ft; return f; } struct force force_WallL(long i, real dn) { real vn, fn, vt, ft; struct force f; vn = g[i].v1; fn = -km * dn + num * vn; ic += num * vn * vn * dt; if (fn < 0.) fn = 0.; vt = g[i].v2; if (vt > 0) ft = mum * fn; else ft = mum * fn; if (vt > 0) ft = -ft; f.f1 = fn; f.f2 = ft; // f.f2=ft*g[i].r-fabs(murf*g[i].r*g[i].v3*fn); f.f3 = ft * g[i].r * murf; // f.f3=(ft-fabs(vt*nuf))*g[i].r; // Stress computations g[i].M11 += f.f1 * fabs(dt); g[i].M12 += 0; g[i].M21 += f.f2 * fabs(dt); g[i].M22 += 0; g[i].p += fn; g[i].s += ft; g[i].f1 += f.f1; g[i].z += 1; g[i].ice += ic; g[i].rw += fabs(f.f3) * fabs(g[i].v3 * dt); g[i].fr += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); pft = ft; return f; } struct force force_WallR(long i, real dn) { real vn, fn, vt, ft; struct force f; vn = g[i].v1; fn = km * dn - num * vn; // ic+=num*vn*vn*dt; vt = g[i].v2; // tangential velcoty ft = mum * fn; // ic+=nugt*vt*vt*dt; // ftmax=mum*fn-num*vt; if (vt > 0) ft = -ft; if (fn > 0.) fn = 0.; f.f1 = fn; f.f2 = -ft; f.f3 = ft * g[i].r * murf; g[i].p += fn; g[i].f1 += f.f1; // g[i].ice +=ic; // g[i].fr+=fabs(ft)*(fabs(vt*dt)+(fabs(f.f3-pft))/kt); pft = ft; // Stress computations g[i].M11 += f.f1 * fabs(dt); g[i].M12 += 0; g[i].M21 += f.f2 * fabs(dt); g[i].M22 += 0; // g[i].s += ft; g[i].z += 1; return f; } // ******************************************************************* // * * // * * // * * // * Calculate the hydrodynamic forces * // * * // * * // * * // * * // ******************************************************************* // ******************************************************************************************* // * Reinitialise density distributions for nodes that change state from solid // to fluid * // ******************************************************************************************* void reinit_obst_density() { #pragma omp parallel for for (int x = 1; x < lx - 1; x++) { for (int y = 1; y < ly - 1; y++) { int i = obst[x][y]; if (i != -1) { // Usqu: the standard (^2) the speed of the node from the portion // solid to the fluid portion real u_squ = ((g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) +(g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3) * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / (c * c); for (int iLB = 0; iLB < Q; iLB++) { // eu : e.u in formula feq real eu = (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / c; f[x][y][iLB] = w[iLB] * (1. + 3 * eu + 4.5 * eu * eu - 1.5 * u_squ); //*rho_moy; } } } } } // ******************************************************************************************* // * Obstacle array construction && nodes activity * // ******************************************************************************************* void obst_construction() { int x, y, xp, next_x, next_y, i, iLB, xi, yi, xf, yf; // c.d.g. sphere // real xc,yc; real dist2, aa, bb, cc, r2, xc, yc, R2, rbl0; #pragma omp parallel for for (x = 1; x < lx - 1; x++) { for (y = 1; y < ly - 1; y++) { obst[x][y] = -1; act[x][y] = 1; for (iLB = 1; iLB < Q; iLB++) { delta[x][y][iLB] = 0; } } } #pragma omp parallel for for (i = 0; i < nbgrains; i++) { xc = (g[i].x1 - Mgx) / dx; yc = (g[i].x2 - Mby) / dx; r2 = rLB[i] * rLB[i]; rbl0 = g[i].r / dx; // JYD2 R2 = rbl0 * rbl0; // xi=xc-rLB[i]; xf=xc+rLB[i]; if(xi<1) xi=1; if(xf>=lx-1) xf=lx-2; // yi=yc-rLB[i]; yf=yc+rLB[i]; if(yi<1) yi=1; if(yf>=ly-1) yf=ly-2; xi = (int)(xc - rbl0); xf = (int)(xc + rbl0); if (xi < 1) xi = 1; if (xf >= lx - 1) xf = lx - 2; yi = (int)(yc - rbl0); yf = (int)(yc + rbl0); if (yi < 1) yi = 1; if (yf >= ly - 1) yf = ly - 2; for (y = yi; y <= yf; y++) { for (x = xi; x <= xf; x++) { dist2 = (x - xc) * (x - xc) + (y - yc) * (y - yc); if (dist2 <= R2) { if (dist2 <= r2) obst[x][y] = i; } // if(dist2<=r2) obst[x][y]=i; } } // * Obstacle in inteaction with fluid (active obstacles) for (y = yi; y <= yf; y++) { for (x = xi; x <= xf; x++) { if (obst[x][y] == i) { act[x][y] = 0; // Search fluid node neighbourss for (iLB = 1; iLB < Q; iLB++) { next_x = x + ex[iLB]; // if (next_x<0) next_x=0; if (next_x>=lx) // next_x=lx-1; next_y = y + ey[iLB]; // if (next_y<0) next_y=0; if (next_y>=ly) // next_y=ly-1; if (obst[next_x][next_y] == -1) { // Calculating the distance between the node fluid and the wall of the particle // (Klaus-Nils-Ulrich) act[x][y] = 1; xp = x; aa = fabs(ex[iLB]) + fabs(ey[iLB]); bb = (xp + ex[iLB] - xc) * ex[iLB] + (y + ey[iLB] - yc) * ey[iLB]; cc = (xp + ex[iLB] - xc) * (xp + ex[iLB] - xc) + (y + ey[iLB] - yc) * (y + ey[iLB] - yc) - r2; delta[x][y][iLB] = (bb - sqrt(fabs(bb * bb - aa * cc))) / aa; } } } } } } } // ************************************************************************************************ // * Principal LB: Collision - (Streaming + Boundary Conditions) * // ************************************************************************************************ void collision_streaming() { const int half = (Q - 1) / 2; const real a = 1. / 36; // Post-collision part computation // (Yu-Mei-Luo-Shyy) #pragma omp parallel for for (int x = 1; x < lx - 1; x++) { for (int y = 1; y < ly - 1; y++) { if (obst[x][y] == -1) { real rho = f[x][y][0] + f[x][y][1] + f[x][y][2] + f[x][y][3] + f[x][y][4] + f[x][y][5] + f[x][y][6] + f[x][y][7] + f[x][y][8]; real e = -4 * f[x][y][0] + 2 * f[x][y][1] - f[x][y][2] + 2 * f[x][y][3] - f[x][y][4] + 2 * f[x][y][5] - f[x][y][6] + 2 * f[x][y][7] - f[x][y][8]; real eps = 4 * f[x][y][0] + f[x][y][1] - 2 * f[x][y][2] + f[x][y][3] - 2 * f[x][y][4] + f[x][y][5] - 2 * f[x][y][6] + f[x][y][7] - 2 * f[x][y][8]; real j_x = f[x][y][5] + f[x][y][6] + f[x][y][7] - f[x][y][1] - f[x][y][2] - f[x][y][3]; real q_x = -f[x][y][1] + 2 * f[x][y][2] - f[x][y][3] + f[x][y][5] - 2 * f[x][y][6] + f[x][y][7]; real j_y = f[x][y][1] + f[x][y][8] + f[x][y][7] - f[x][y][3] - f[x][y][4] - f[x][y][5]; real q_y = f[x][y][1] - f[x][y][3] + 2 * f[x][y][4] - f[x][y][5] + f[x][y][7] - 2 * f[x][y][8]; real p_xx = f[x][y][2] - f[x][y][4] + f[x][y][6] - f[x][y][8]; real p_xy = -f[x][y][1] + f[x][y][3] - f[x][y][5] + f[x][y][7]; real j_x2 = j_x * j_x; real j_y2 = j_y * j_y; real eO = e - s2 * (e + 2 * rho - 3 * (j_x2 + j_y2) / rho); real epsO = eps - s3 * (eps - rho + 3 * (j_x2 + j_y2) / rho); real q_xO = q_x - s5 * (q_x + j_x); real q_yO = q_y - s7 * (q_y + j_y); real p_xxO = p_xx - s8 * (p_xx - (j_x2 - j_y2) / rho); real p_xyO = p_xy - s9 * (p_xy - j_x * j_y / rho); f[x][y][0] = a * (4*rho - 4 * eO + 4 * epsO); f[x][y][2] = a * (4*rho - eO - 2*epsO - 6*j_x + 6*q_xO + 9*p_xxO); f[x][y][4] = a * (4*rho - eO - 2*epsO - 6*j_y + 6*q_yO - 9*p_xxO); f[x][y][6] = a * (4*rho - eO - 2*epsO + 6*j_x - 6*q_xO + 9*p_xxO); f[x][y][8] = a * (4*rho - eO - 2*epsO + 6*j_y - 6*q_yO - 9*p_xxO); f[x][y][1] = a * (4*rho + 2*eO + epsO - 6*j_x - 3*q_xO + 6*j_y + 3*q_yO - 9*p_xyO); f[x][y][3] = a * (4*rho + 2*eO + epsO - 6*j_x - 3*q_xO - 6*j_y - 3*q_yO + 9*p_xyO); f[x][y][5] = a * (4*rho + 2*eO + epsO + 6*j_x + 3*q_xO - 6*j_y - 3*q_yO - 9*p_xyO); f[x][y][7] = a * (4*rho + 2*eO + epsO + 6*j_x + 3*q_xO + 6*j_y + 3*q_yO + 9*p_xyO); } } } // To calculate the edges; see the book Lattice Boltzmann Modeling // Bounce back for y=0 & y=ly-1 for (int x = 1; x < lx - 1; x++) { f[x][0][8] = f[x][1][4]; f[x][0][7] = f[x + 1][1][3]; //;+uw_b/6; f[x][0][1] = f[x - 1][1][5]; //-uw_b/6; // Top plate f[x][ly - 1][4] = f[x][ly - 2][8]; f[x][ly - 1][3] = f[x - 1][ly - 2][7]; //-uw_h/6; f[x][ly - 1][5] = f[x + 1][ly - 2][1]; //+uw_h/6; } for (int y = 1; y < ly - 1; y++) { f[0][y][6] = f[1][y][2]; f[0][y][7] = f[1][y + 1][3]; //;+uw_b/6; f[0][y][5] = f[1][y - 1][1]; //-uw_b/6; f[lx - 1][y][2] = f[lx - 2][y][6]; f[lx - 1][y][3] = f[lx - 2][y - 1][7]; //-uw_h/6; f[lx - 1][y][1] = f[lx - 2][y + 1][5]; //+uw_h/6; } // corner nodes f[0][0][7] = f[1][1][3]; f[lx - 1][0][1] = f[lx - 2][1][5]; //+uw_b/6 f[0][ly - 1][5] = f[1][ly - 2][1]; //-uw_b/6 f[lx - 1][ly - 1][3] = f[lx - 2][ly - 2][7]; // bounce back in obstacles ///////////////////////////////////////////////////////// // To calculate force f[][][]) // // 1: articlel of JYD-Mouloud // // 2: article of Klaus-Nils-Ulrich // // 3: article of Yu-Mei-Luo-Shyy // ///////////////////////////////////////////////////////// for (int x = 1; x < lx - 1; x++) { for (int y = 1; y < ly - 1; y++) { int i = obst[x][y]; if (i != -1 && act[x][y] == 1) { for (int iLB = 1; iLB <= half; iLB++) { int next_x = x + ex[iLB]; int next_y = y + ey[iLB]; if (obst[next_x][next_y] != -1) f[x][y][iLB] = w[iLB]; else //(obst[next_x][next_y]==-1) { // Calculation is based on JYD-Mouloud (2.3.3) if (delta[x][y][iLB] >= 0.5) { f[x][y][iLB] = f[next_x][next_y][iLB + half] / (2 * delta[x][y][iLB]) + (2 * delta[x][y][iLB] - 1) * f[next_x][next_y][iLB] / (2 * delta[x][y][iLB]) + 3 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / delta[x][y][iLB]; } if (delta[x][y][iLB] > 0. && delta[x][y][iLB] < 0.5) { int next_xx = next_x + ex[iLB]; int next_yy = next_y + ey[iLB]; f[x][y][iLB] = 2 * delta[x][y][iLB] * f[next_x][next_y][iLB + half] + (1 - 2 * delta[x][y][iLB]) * f[next_xx][next_yy][iLB + half] + 6 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)); } } } for (int iLB = 1 + half; iLB < Q; iLB++) { int next_x = x + ex[iLB]; int next_y = y + ey[iLB]; if (obst[next_x][next_y] != -1) f[x][y][iLB] = w[iLB]; else //(obst[next_x][next_y]==-1) { // Calculation is based on JYD-Mouloud (2.3.3) if (delta[x][y][iLB] >= 0.5) { f[x][y][iLB] = f[next_x][next_y][iLB - half] / (2 * delta[x][y][iLB]) + (2 * delta[x][y][iLB] - 1) * f[next_x][next_y][iLB] / (2 * delta[x][y][iLB]) + 3 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)) / delta[x][y][iLB]; } if (delta[x][y][iLB] > 0. && delta[x][y][iLB] < 0.5) { int next_xx = next_x + ex[iLB]; int next_yy = next_y + ey[iLB]; f[x][y][iLB] = 2 * delta[x][y][iLB] * f[next_x][next_y][iLB - half] + (1 - 2 * delta[x][y][iLB]) * f[next_xx][next_yy][iLB - half] + 6 * (w[iLB] / c) * (ex[iLB] * (g[i].v1 - (y * dx + Mby - g[i].x2) * g[i].v3) + ey[iLB] * (g[i].v2 + (x * dx + Mgx - g[i].x1) * g[i].v3)); } } } } } } for (int x = 0; x < lx; x++) { for (int y = 0; y < ly; y++) { for (int iLB = 1; iLB <= half; iLB++) { swap(&f[x][y][iLB], &f[x][y][iLB + half]); } } } for (int x = 0; x < lx; x++) { for (int y = 0; y < ly; y++) { for (int iLB = 1; iLB <= half; iLB++) { int next_x = x + ex[iLB]; // if(next_x<0) next_x=lx-1; int next_y = y + ey[iLB]; if (next_x >= 0 && next_y >= 0 && next_x < lx && next_y < ly) { swap(&f[x][y][iLB + half], &f[next_x][next_y][iLB]); } } } } } // ************************************************************************************************ // * Compute total density to verify of the system that no divergence occurs // when iterating * // ************************************************************************************************ void check_density() { int x, y, iLB; real sum = 0; for (x = 0; x < lx; x++) { for (y = 0; y < ly; y++) { for (iLB = 0; iLB < Q; iLB++) { sum = sum + f[x][y][iLB]; } } } printf("Iteration Number %ld, Total density in the system %f\n", nbsteps, sum); } void final_density() { int x, y, iLB; real sum = 0; for (x = 0; x < lx; x++) { for (y = 0; y < ly; y++) { for (iLB = 0; iLB < Q; iLB++) { sum = sum + f[x][y][iLB]; } } } fprintf(stderr, "final_density: %f\n", sum); } int min(int x, int y) { return (x < y) ? x : y; } int max(int x, int y) { return (x > y) ? x : y; } // **************************************************************************** // * Compute hydrodynamic forces * // **************************************************************************** void forces_fluid(int nx, int ny, real f[nx][ny][Q], int nbgrains, struct grain g[nbgrains]) { const int half = (Q - 1) / 2; for (int i = 0; i < nbgrains; ++i) { fhf1[i] = 0; fhf2[i] = 0; fhf3[i] = 0; } #pragma omp parallel for for (int i = 0; i < nbgrains; ++i) { const real xc = (g[i].x1 - Mgx) / dx; const real yc = (g[i].x2 - Mby) / dx; const real rbl0 = g[i].r / dx; const int xi = max(xc - rbl0, 1); const int xf = min(xc + rbl0, nx-2); const int yi = max(yc - rbl0, 1); const int yf = min(yc + rbl0, ny-2); for (int x = xi; x <= xf; ++x) { for (int y = yi; y <= yf; ++y) { if (i != obst[x][y]) continue; for (int iLB = 1; iLB < Q; ++iLB) { const int next_x = x + ex[iLB]; const int next_y = y + ey[iLB]; if (obst[next_x][next_y] != i) { const int halfq = (iLB <= half) ? half : -half; const real fnx = (f[x][y][iLB + halfq] + f[next_x][next_y][iLB]) * ex[iLB + halfq]; const real fny = (f[x][y][iLB + halfq] + f[next_x][next_y][iLB]) * ey[iLB + halfq]; fhf1[i] = fhf1[i] + fnx; fhf2[i] = fhf2[i] + fny; fhf3[i] = fhf3[i] - fnx * (y - (g[i].x2 - Mby) / dx) + fny * (x - (g[i].x1 - Mgx) / dx); } } } } } #pragma omp parallel for for (int i = 0; i < nbgrains; ++i) { fhf1[i] *= rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5)); fhf2[i] *= rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5)); fhf3[i] *= dx * rho_moy * 9 * nu * nu / (dx * (tau - 0.5) * (tau - 0.5)); } } //********************************************************** void acceleration_grains() { long i, j; int jdep; real dn, ftest; real fn, ft; struct force fji; if (nbsteps % stepFilm == 0 && start == 1) { // Outfile MGPost // distance normale real xOiOj, yOiOj, OiOj; real xn, yn; real vn, vxOiOj, vyOiOj; real vt; for (i = 0; i <= nbgrains - 1; i++) { g[i].a1 = fhf1[i]; g[i].a2 = fhf2[i]; g[i].a3 = fhf3[i]; } // Summation of forces on the grains for (i = 0; i <= nbgrains - 1; i++) { if (i == 0) jdep = 0; else jdep = cumul[i - 1]; for (j = jdep; j < cumul[i]; j++) { //* // fji=force_grains(i,neighbours[j]); //* // forces_grains xOiOj = g[i].x1 - g[neighbours[j]].x1; yOiOj = g[i].x2 - g[neighbours[j]].x2; OiOj = sqrt(xOiOj * xOiOj + yOiOj * yOiOj); dn = OiOj - g[i].r - g[neighbours[j]].r; if (dn >= 0) { fji.f1 = 0; fji.f2 = 0; fji.f3 = 0; } else { // relative normal velocity vxOiOj = g[i].v1 - g[neighbours[j]].v1; vyOiOj = g[i].v2 - g[neighbours[j]].v2; xn = xOiOj / OiOj; yn = yOiOj / OiOj; vn = vxOiOj * xn + vyOiOj * yn; vt = -vxOiOj * yn + vyOiOj * xn - g[i].v3 * g[i].r - g[neighbours[j]].v3 * g[neighbours[j]].r; fn = -kg * dn - nug * vn; if (fn < 0) fn = 0.0; ft = kt * vt * dt; ftest = mu * ft; if (fabs(ft) > ftest) { if (ft > 0.0) ft = ftest; else ft = -ftest; } //calculate the normal force fji.f1 = fn * xn - ft * yn; fji.f2 = fn * yn + ft * xn; fji.f3 = -ft * g[i].r * murf; g[i].p += fn; g[neighbours[j]].p += fn; g[i].s += ft; g[neighbours[j]].s += ft; g[i].slip += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); g[neighbours[j]].slip += fabs(ft) * (fabs(vt * dt) + (fabs(ft - pft)) / kt); g[i].rw += fabs(fji.f3) * (fabs(g[i].v3 * dt) + (fabs(fji.f3 - pff)) / kt); g[neighbours[j]].rw += fabs(fji.f3) * (fabs(g[i].v3 * dt) + (fabs(fji.f3 - pff)) / kt); g[i].z += 1; pff = fji.f3; pft = ft; // Stress computations g[i].M11 += fji.f1 * xOiOj; g[i].M12 += fji.f1 * yOiOj; g[i].M21 += fji.f2 * xOiOj; g[i].M22 += fji.f2 * yOiOj; } // end force_grains g[i].a1 = g[i].a1 + fji.f1; g[i].a2 = g[i].a2 + fji.f2; g[i].a3 = g[i].a3 + fji.f3; g[neighbours[j]].a1 = g[neighbours[j]].a1 - fji.f1; g[neighbours[j]].a2 = g[neighbours[j]].a2 - fji.f2; g[neighbours[j]].a3 = g[neighbours[j]].a3 + fji.f3; } } } else { // Calculate normal for (i = 0; i <= nbgrains - 1; i++) { g[i].a1 = fhf1[i]; g[i].a2 = fhf2[i]; g[i].a3 = fhf3[i]; } // summation of forces between the grains for (i = 0; i <= nbgrains - 1; i++) { // printf("cumul(%d)= %d\n",i,cumul[i]); if (i == 0) jdep = 0; else jdep = cumul[i - 1]; for (j = jdep; j < cumul[i]; j++) { // printf("grain(%d), neighbours(%d)= %d\n",i,i,neighbours[j]); fji = force_grains(i, neighbours[j]); g[i].a1 = g[i].a1 + fji.f1; g[i].a2 = g[i].a2 + fji.f2; g[i].a3 = g[i].a3 + fji.f3; g[neighbours[j]].a1 = g[neighbours[j]].a1 - fji.f1; g[neighbours[j]].a2 = g[neighbours[j]].a2 - fji.f2; g[neighbours[j]].a3 = g[neighbours[j]].a3 + fji.f3; } } } // Forces on the botton wall for (i = 0; i < nNeighWallb; i++) { dn = g[neighbourWallB[i]].x2 - g[neighbourWallB[i]].r - Mby; if (dn < 0) { fji = force_WallB(neighbourWallB[i], dn); g[neighbourWallB[i]].a1 = g[neighbourWallB[i]].a1 + fji.f1; g[neighbourWallB[i]].a2 = g[neighbourWallB[i]].a2 + fji.f2; g[neighbourWallB[i]].a3 = g[neighbourWallB[i]].a3 + fji.f3; g[neighbourWallB[i]].fr += fabs(fji.f1) * (fabs(dt * g[i].v1) + fabs(dt2 * g[i].a1) + (fabs(fji.f1 - pf)) / kt); // Friction work at the wall dt2*g[i].a1/2 pf = fji.f1; // Previous force fji.f1 } } // Forces on the Top Wall for (i = 0; i < nNeighWallt; i++) { dn = -g[neighbourWallT[i]].x2 - g[neighbourWallT[i]].r + Mhy; if (dn < 0) { fji = force_WallT(neighbourWallT[i], dn); g[neighbourWallT[i]].a1 = g[neighbourWallT[i]].a1 + fji.f1; g[neighbourWallT[i]].a2 = g[neighbourWallT[i]].a2 + fji.f2; g[neighbourWallT[i]].a3 = g[neighbourWallT[i]].a3 + fji.f3; } } // Forces on the Left Wall for (i = 0; i < nNeighWallL; i++) { dn = g[neighbourWallL[i]].x1 - g[neighbourWallL[i]].r - Mgx; if (dn < 0) { fji = force_WallL(neighbourWallL[i], dn); g[neighbourWallL[i]].a1 = g[neighbourWallL[i]].a1 + fji.f1; g[neighbourWallL[i]].a2 = g[neighbourWallL[i]].a2 + fji.f2; g[neighbourWallL[i]].a3 = g[neighbourWallL[i]].a3 + fji.f3; g[neighbourWallL[i]].fr += fabs(fji.f2) * (fabs(dt * g[i].v1) + fabs(dt2 * g[i].a1) + (fabs(fji.f2 - pf)) / kt); // Friction work at the wall pf = fji.f2; // Previous force fji.f1 } } // Forces on the right Wall for (i = 0; i < nNeighWallR; i++) { dn = -g[neighbourWallR[i]].x1 - g[neighbourWallR[i]].r + Mdx; if (dn < 0) { fji = force_WallR(neighbourWallR[i], dn); g[neighbourWallR[i]].a1 = g[neighbourWallR[i]].a1 + fji.f1; g[neighbourWallR[i]].a2 = g[neighbourWallR[i]].a2 + fji.f2; g[neighbourWallR[i]].a3 = g[neighbourWallR[i]].a3 + fji.f3; } } // calculate acceleration for (i = 0; i <= nbgrains - 1; i++) { g[i].a1 = g[i].a1 / g[i].m + ((g[i].m - g[i].mw) / g[i].m) * xG; g[i].a2 = (g[i].a2 / g[i].m) + ((g[i].m - g[i].mw) / g[i].m) * yG; g[i].a3 = g[i].a3 / g[i].It; } } //********************************************************************** void initVerlet() { int i, j; int jneighbours; real distx, disty; // real distVerlet=.1e-7; jneighbours = 0; for (i = 0; i < nbgrains; i++) { for (j = i + 1; j < nbgrains; j++) { distx = g[i].x1 - g[j].x1; disty = g[i].x2 - g[j].x2; if (((fabs(distx) - g[i].r - g[j].r) <= distVerlet) && ((fabs(disty) - g[i].r - g[j].r) <= distVerlet)) { if ((sqrt(distx * distx + disty * disty) - g[i].r - g[j].r) <= distVerlet) { neighbours[jneighbours] = j; jneighbours++; if (jneighbours == (nbgrains * 6 - 1)) printf("error! size of vector verlet neighbors is outdated"); } } cumul[i] = jneighbours; } // printf("cumul(%d)= %d\n",i,cumul[i]); } } void VerletWall() { int i; nNeighWallb = 0; nNeighWallL = 0; nNeighWallt = 0; nNeighWallR = 0; real dn; // real distVerlet=.1e-7; // Verlet WallB if (nbsteps * dt < dtt) { Mdx = 1.e-3 * lx / 10; Mhy = (1.e-3 * ly / 10); } else { Mdx = 1.e-3 * lx; Mhy = 1.e-3 * ly; } for (i = 0; i < nbgrains; ++i) { dn = g[i].x2 - g[i].r - Mby; if (dn < distVerlet) { neighbourWallB[nNeighWallb] = i; ++nNeighWallb; } } // Verlet WallT for (i = 0; i < nbgrains; ++i) { dn = -g[i].x2 - g[i].r + Mhy; if (dn < distVerlet) { neighbourWallT[nNeighWallt] = i; ++nNeighWallt; } } // Verlet WallL for (i = 0; i < nbgrains; ++i) { dn = g[i].x1 - g[i].r - Mgx; if (dn < distVerlet) { neighbourWallL[nNeighWallL] = i; ++nNeighWallL; } } // Verlet WallR for (i = 0; i < nbgrains; ++i) { dn = -g[i].x1 - g[i].r + Mdx; if (dn < distVerlet) { neighbourWallR[nNeighWallR] = i; ++nNeighWallR; } } } // ******************************************************************************************* // * writing obstacle arrays // * // ******************************************************************************************* void obst_writing() { int x, y, i; char filename1[] = "obst_LB.dat"; FILE* outfile1; outfile1 = fopen(filename1, "w"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { fprintf(outfile1, "%d ", obst[x][y]); } fprintf(outfile1, "\n"); } fclose(outfile1); char filename2[] = "active_nodes.dat"; FILE* outfile2; outfile2 = fopen(filename2, "w"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { fprintf(outfile2, "%d ", act[x][y]); } fprintf(outfile2, "\n"); } fclose(outfile2); char filename3[] = "links.dat"; FILE* outfile3; outfile3 = fopen(filename3, "w"); for (y = 0; y < ly; y++) { for (x = 0; x < lx; x++) { for (i = 1; i < Q; i++) { if (delta[x][y][i] != 0) { fprintf(outfile3, "%d %d %d %f\n", x, y, i, delta[x][y][i]); } } } } fclose(outfile3); } // **************************************************************************** // * Output of results to velocity files * // * Distribution of verlocity x - y * // **************************************************************************** void velocity_profile() { int x, y, i; real u_y, d_loc; real u_y1; char filename1[] = "yvel_vs_x.dat"; FILE* outfile1; outfile1 = fopen(filename1, "w"); fprintf(outfile1, "# vitesse u_x ordonnée \n"); y = (int)((g[0].x2 - Mby) / dx); for (x = 0; x < lx; x++) { if (obst[x][y] != -1 && obst[x][y] != nbgrains) u_y1 = g[obst[x][y]].v2 / c; else { u_y = 0; d_loc = 0.; for (i = 0; i < Q; i++) { d_loc = d_loc + f[x][y][i]; } for (i = 0; i < Q; i++) { u_y = u_y + f[x][y][i] * ey[i]; } u_y1 = u_y / d_loc; } fprintf(outfile1, "%d %.10lf\n", x, u_y1); } fclose(outfile1); } //************************************************************ // Calculate pressure * //************************************************************ void pressures() { int x, y; for (x = 0; x < lx; x++) { for (y = 0; y < ly; y++) { if (obst[x][y] == -1) { press[x][y] = (f[x][y][0] + f[x][y][1] + f[x][y][2] + f[x][y][3] + f[x][y][4] + f[x][y][5] + f[x][y][6] + f[x][y][7] + f[x][y][8] - rho_moy) * c_squ; } else press[x][y] = 0.; } } } //---------------------------------------------------------- void renderScene(void) { long i; if (start == 1) { if (vib == 1) { t = t + dt; // Mby=Mby+0.1*amp*sin(freq*t); Mgx = Mgx + amp * sin(freq * t); Mdx = Mdx + amp * sin(freq * t); } // FORCES FLUIDES !!! #ifdef _FLUIDE_ if (nbsteps % npDEM == 0) { reinit_obst_density(); obst_construction(); collision_streaming(); if (nbsteps % stepConsole == 0) check_density(); forces_fluid(lx, ly, f, nbgrains, g); } #endif if (nbsteps % UpdateVerlet == 0) { initVerlet(); VerletWall(); } /* for(i=0; i<=nbgrains_bas-1; i++) { g[i].a1=0.; g[i].a2=0.; g[i].a3=0.; } */ for (i = 0; i <= nbgrains - 1; i++) { g[i].p = 0; // reset pressure g[i].s = 0.; g[i].ifm = 0; g[i].f1 = 0.; g[i].f2 = 0.; g[i].ice = 0; g[i].fr = 0.; g[i].slip = 0; g[i].rw = 0.; ic = 0.; g[i].M11 = g[i].M12 = g[i].M21 = g[i].M22 = 0.; // Moments g[i].z = 0; // reset coordination numbers g[i].zz = 0; g[i].x1 = g[i].x1 + dt * g[i].v1 + dt2 * g[i].a1 / 2.; g[i].x2 = g[i].x2 + dt * g[i].v2 + dt2 * g[i].a2 / 2.; g[i].x3 = g[i].x3 + dt * g[i].v3 + dt2 * g[i].a3 / 2.; g[i].v1 = g[i].v1 + dt * g[i].a1 / 2.; g[i].v2 = g[i].v2 + dt * g[i].a2 / 2.; g[i].v3 = g[i].v3 + dt * g[i].a3 / 2.; } acceleration_grains(); for (i = 0; i <= nbgrains - 1; i++) { // g[i].p=g[i].p/(2.*M_PI*g[i].r); // pressure on grains g[i].v1 = g[i].v1 + dt * g[i].a1 / 2.; g[i].v2 = g[i].v2 + dt * g[i].a2 / 2.; g[i].v3 = g[i].v3 + dt * g[i].a3 / 2.; } nbsteps++; } if (nbsteps % stepFilm == 0 && start == 1) { #ifdef _FLUIDE_ write_vtk(lx, ly, f, nbgrains, g); #endif nFile++; } if (nbsteps % stepStrob == 0 && start == 1) { write_DEM(); write_forces(); } } ////////////////////////////////////////////////////////////////////////////// //////////////////////////////// MAIN ///////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { time_t time_raw_format; struct tm* ptr_time; printf("2D LBM-DEM code\n"); int i; real dtmax; if (argc != 2) { printf("usage: usage %s <filename>\n", argv[0]); exit(EXIT_FAILURE); } printf("Opening file : %s\n", argv[1]); c_squ = 1. / 3.; g = read_sample(argv[1]); check_sample(nbgrains, g); f = malloc(sizeof(real)*lx*ly*Q); assert(f); obst = malloc(sizeof(int)*lx*ly); assert(obst); act = malloc(sizeof(int)*lx*ly); assert(act); delta = malloc(sizeof(real)*lx*ly*Q); assert(delta); rLB = malloc(sizeof(real)*nbgrains); assert(rLB); cumul = malloc(sizeof(int)*nbgrains); assert(cumul); neighbours = malloc(sizeof(int)*nbgrains*6); assert(neighbours); neighbourWallB = malloc(sizeof(int)*nbgrains); assert(neighbourWallB); neighbourWallR = malloc(sizeof(int)*nbgrains); assert(neighbourWallR); neighbourWallL = malloc(sizeof(int)*nbgrains); assert(neighbourWallL); neighbourWallT = malloc(sizeof(int)*nbgrains); assert(neighbourWallT); memset(cumul, 0, sizeof(int)*nbgrains); memset(neighbours, 0, sizeof(int)*nbgrains*6); memset(neighbourWallB, 0, sizeof(int)*nbgrains); memset(neighbourWallR, 0, sizeof(int)*nbgrains); memset(neighbourWallL, 0, sizeof(int)*nbgrains); memset(neighbourWallT, 0, sizeof(int)*nbgrains); // c1 = malloc(sizeof(struct contact)*nbgrainsMax*nbgrainsMax); assert(c1); // c2 = malloc(sizeof(struct contact)*nbgrains); assert(c2); // memset(c1, 0, sizeof(struct contact)*nbgrains*nbgrains); // memset(c2, 0, sizeof(struct contact)*nbgrains); fhf = malloc(sizeof(struct force)*nbgrains); assert(fhf); fhf1 = malloc(sizeof(real)*nbgrains); assert(fhf1); fhf2 = malloc(sizeof(real)*nbgrains); assert(fhf2); fhf3 = malloc(sizeof(real)*nbgrains); assert(fhf3); init_density(lx, ly, f); Mgx = 0.; Mdx = 1.e-3 * lx / 10; Mhy = 1.e-3 * ly / 10; Mby = 0.; xG = -G * sin(angleG); yG = -G * cos(angleG); dx = (1./scale) * (Mdx - Mgx) / (lx - 1); printf("no space %le\n", dx); // Compute the time step for DEM real rMin = minimum_grain_radius(nbgrains, g); dtmax = (1 / iterDEM) * pi * rMin * sqrt(pi * rhoS / kg); dtLB = dx * dx * (tau - 0.5) / (3 * nu); npDEM = (dtLB / dtmax + 1); c = dx / dtLB; dt = dtLB / npDEM; dt2 = dt * dt; printf("dtLB=%le, dtmax=%le, dt=%le, npDEM=%d, c=%lf\n", dtLB, dtmax, dt, npDEM, c); for (i = 0; i <= nbgrains - 1; i++) { rLB[i] = reductionR * g[i].r / dx; } init_obst(); // VerletWall(); time(&time_raw_format); ptr_time = localtime(&time_raw_format); printf("Current local time and date: %s", asctime(ptr_time)); char filename_stats[] = "stats.data"; s_stats = fopen(filename_stats, "w"); fprintf(s_stats, "#1_t 2_xfront 3_xgrainmax 4_height 5_zmean 6_energie_x 7_energie_y " "8_energie_teta 9_energie_cin 10_N0 11_N1 12_N2 13_N3 14_N4 15_N5 " "16_energy_Potential 17_Strain_Energy 18_Frictional_Work " "19_Internal_Friction 20_Inelastic_Collision 21_Slip " "22_Rotational_Work\n"); fclose(s_stats); start = 1; do { renderScene(); time(&time_raw_format); ptr_time = localtime(&time_raw_format); if (nbsteps % UpdateVerlet == 0) printf( "steps %li steps %le KE %le PE %le SE %le WF %le INCE %le SLIP %le " "RW %le Time %s \n", nbsteps, nbsteps * dt, energie_cin, energy_p, SE, WF, INCE, TSLIP, TRW, asctime(ptr_time)); // IFR TSE TBW } while (nbsteps * dt <= duration); final_density(); time(&time_raw_format); ptr_time = localtime(&time_raw_format); printf("End local time and date: %s", asctime(ptr_time)); return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(24*t2-Nz,4)),3*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(12*t1+Ny+21,4)),floord(24*t2+Ny+20,4)),floord(24*t1-24*t2+Nz+Ny+19,4));t3++) { for (t4=max(max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32)),ceild(4*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t3+Nx,32),floord(Nt+Nx-4,32)),floord(12*t1+Nx+21,32)),floord(24*t2+Nx+20,32)),floord(24*t1-24*t2+Nz+Nx+19,32));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),4*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),4*t3+2),32*t4+30),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unop__floor_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fp64_fp64) // op(A') function: GB (_unop_tran__floor_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = floor (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = floor (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = floor (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = floor (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = floor (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
array_out_bound.c
//#include <omp.h> int main(){ int i = 0; int *p = &i; int a[2]; #pragma omp parallel for for(i=1; i<10; i++){ a[i] = a[*p + i +1]; } }
PrefixScan.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "timer.h" #define MIN(a,b) ((a) < (b) ? (a) : (b)) void PrefixScan (int *input, int *output, int length); int main(int argc, char *argv[]){ #pragma omp parallel if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads()); struct timespec tstart; double time_serial, time_threaded; // large enough to force into main memory #define ARRAY_SIZE 8000000 int *input_serial, *input_threaded, *output_serial, *output_threaded; input_serial = (int *)malloc(ARRAY_SIZE*sizeof(int)); input_threaded = (int *)malloc(ARRAY_SIZE*sizeof(int)); output_serial = (int *)malloc(ARRAY_SIZE*sizeof(int)); output_threaded = (int *)malloc(ARRAY_SIZE*sizeof(int)); for (int i=0; i<ARRAY_SIZE; i++) { input_serial[i] = 1+i%2; } #pragma omp parallel for for (int i=0; i<ARRAY_SIZE; i++) { input_threaded[i] = 1+i%2; } cpu_timer_start(&tstart); PrefixScan(input_serial, output_serial, ARRAY_SIZE); time_serial += cpu_timer_stop(tstart); cpu_timer_start(&tstart); #pragma omp parallel PrefixScan(input_threaded, output_threaded, ARRAY_SIZE); time_threaded = cpu_timer_stop(tstart); printf("Runtime is for serial %lf threaded %lf speedup %lf msecs\n", time_serial, time_threaded, time_serial/time_threaded); free(input_serial); free(input_threaded); free(output_serial); free(output_threaded); } void PrefixScan (int *input, int *output, int length) { // Get the total number of threads and thread_id int nthreads = 1; int thread_id = 0; #ifdef _OPENMP nthreads = omp_get_num_threads(); thread_id = omp_get_thread_num(); #endif // Compute the range for which this thread is responsible. int tbegin = length * ( thread_id ) / nthreads; int tend = length * ( thread_id + 1 ) / nthreads; // Only perform this operation if there is a positive number of entries. if ( tbegin < tend ) { // Do an exclusive scan for each thread output[tbegin] = 0; for ( int i = tbegin + 1 ; i < tend ; i++ ) { output[i] = output[i-1] + input[i-1]; } } if (nthreads == 1) return; // Do adjustment to prefix scan for the beginning value for each thread #ifdef _OPENMP // Wait until all threads get here. #pragma omp barrier // On the master thread compute the beginning offset for each thread if (thread_id == 0) { for ( int i = 1 ; i < nthreads ; i ++ ) { int ibegin = length * ( i - 1 ) / nthreads; int iend = length * ( i ) / nthreads; if ( ibegin < iend ) output[iend] = output[ibegin] + input[iend-1]; if ( ibegin < iend - 1 ) output[iend] += output[iend-1]; } } #pragma omp barrier // Start all threads again // Apply the offset to the range for this thread. #pragma omp simd for ( int i = tbegin + 1 ; i < tend ; i++ ) { output[i] += output[tbegin]; } #endif }
spawn_sequential_omp.c
#ifdef HAVE_CONFIG_H # include "config.h" /* for _GNU_SOURCE */ #endif #include <assert.h> #include <stdio.h> #include <omp.h> #include <qthread/qthread.h> #include <qthread/qtimer.h> #define SILENT_ARGPARSING #include "argparsing.h" #include "log.h" static aligned_t null_task(void *args_) { return 0; } int main(int argc, char *argv[]) { uint64_t count = 1048576; unsigned long threads = 1; qtimer_t timer; double total_time = 0.0; CHECK_VERBOSE(); NUMARG(count, "MT_COUNT"); assert(0 != count); #pragma omp parallel #pragma omp single { timer = qtimer_create(); threads = omp_get_num_threads(); qtimer_start(timer); #pragma omp task untied for (uint64_t i = 0; i < count; i++) { #pragma omp task untied null_task(NULL); } #pragma omp taskwait qtimer_stop(timer); } total_time = qtimer_secs(timer); qtimer_destroy(timer); LOG_SPAWN_SEQUENTIAL_YAML(count, total_time) LOG_ENV_OMP_YAML(threads) return 0; } /* vim:set expandtab */
algorithm_engineering.h
#include <fstream> #include <string> #include <iostream> #include <sstream> #include <cstring> #include <cstdio> #include <vector> #include <math.h> #include <string> #include <sys/timeb.h> #include <ctime> using namespace std; int **algorithm_engineering(double ****align_edge, double **row_mat, double **col_mat, int nb_row, int nb_col, double dali_score) { //time //struct timeb startTime, endTime; //ftime(&startTime); //time int **domain = new int *[nb_row]; int nb_delete(0); for (int row = 0; row < nb_row; row++) { domain[row] = new int[nb_col]; for (int col = 0; col < nb_col; col++) { domain[row][col] = 1; } } double **align_node = new double *[nb_row]; for (int row = 0; row < nb_row; row++) { align_node[row] = new double[nb_col]; for (int col = 0; col < nb_col; col++) { align_node[row][col] = 0; } } double dp_mat[150][150] = {0}; //--------------------------------------------local dp #pragma omp parallel for schedule(dynamic) private(dp_mat) for (int row1 = 0; row1 < nb_row; row1++) { for (int col1 = 0; col1 < nb_col; col1++) { for (int i = 0; i < nb_row - row1; i++) dp_mat[i][0] = 0; for (int i = 0; i < nb_col - col1; i++) dp_mat[0][i] = 0; for (int row2 = 1; row2 < nb_row - row1; row2++) { for (int col2 = 1; col2 < nb_col - col1; col2++) { dp_mat[row2][col2] = dp_mat[row2][col2 - 1]; if (dp_mat[row2 - 1][col2] > dp_mat[row2][col2]) dp_mat[row2][col2] = dp_mat[row2 - 1][col2]; double temp = dp_mat[row2 - 1][col2 - 1] + align_edge[row1][col1][row2 - 1][col2 - 1]; if (temp > dp_mat[row2][col2]) dp_mat[row2][col2] = temp; } } align_node[row1][col1] = dp_mat[nb_row - row1 - 1][nb_col - col1 - 1] + 0.2; } } //--------------------------------------------local dp #pragma omp parallel for schedule(dynamic) private(dp_mat) for (int row1 = 0; row1 < nb_row; row1++) { for (int col1 = 0; col1 < nb_col; col1++) { double max_score(0); //--------------------------------------------part 1 if (row1 > 0 && col1 > 0) { for (int i = 0; i < row1; i++) { dp_mat[i][0] = align_node[i][0]; } for (int i = 0; i < col1; i++) dp_mat[0][i] = align_node[0][i]; for (int row = 1; row < row1; row++) { for (int col = 1; col < col1; col++) { dp_mat[row][col] = dp_mat[row][col - 1]; if (dp_mat[row - 1][col] > dp_mat[row][col]) dp_mat[row][col] = dp_mat[row - 1][col]; double temp = dp_mat[row - 1][col - 1] + align_node[row][col]; if (temp > dp_mat[row][col]) dp_mat[row][col] = temp; } } max_score += dp_mat[row1 - 1][col1 - 1]; } //--------------------------------------------part 1 //--------------------------------------------part 2 for (int i = 0; i < nb_row - row1; i++) dp_mat[i][0] = align_node[row1 + i][col1]; for (int i = 0; i < nb_col - col1; i++) dp_mat[0][i] = align_node[row1][col1 + i]; for (int row = 1; row < nb_row - row1; row++) { for (int col = 1; col < nb_col - col1; col++) { dp_mat[row][col] = dp_mat[row][col - 1]; if (dp_mat[row - 1][col] > dp_mat[row][col]) dp_mat[row][col] = dp_mat[row - 1][col]; double temp = dp_mat[row - 1][col - 1] + align_node[row1 + row][col1 + col]; if (temp > dp_mat[row][col]) dp_mat[row][col] = temp; } } max_score += dp_mat[nb_row - row1 - 1][nb_col - col1 - 1]; if (max_score < dali_score) { domain[row1][col1] = 0; nb_delete++; } //--------------------------------------------part 2 } } for (int row1 = 0; row1 < nb_row; row1++) delete[] align_node[row1]; delete[] align_node; //cout << "number of nodes deleted: " << nb_delete << endl; //ftime(&endTime); //cout << "algorithm engineering: " << (endTime.time - startTime.time) * 1000 + (endTime.millitm - startTime.millitm) << endl; return domain; }
rumi-64-192-22r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simmulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdint.h> #include <stdbool.h> typedef unsigned long long int UINT64; // #define DEBUG 1 #define Nthreads 32 #define STEP ((1 << 10) - 1) #define PROGRAMNUMBER 1 // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! size_t initial_seed = 10*time(NULL) + 11*(size_t)(offset); srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08llX\n", initial_seed); } void string_state(unsigned char state[16], int ver) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { printf("%02x", state[i]); } } void string_tweak(unsigned char state[16], int ver) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { printf("%02x", state[i]); } } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, unsigned long long N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { int i; unsigned char p1[16], p2[16]; unsigned char p1_old[16], p2_old[16]; unsigned char c3_old[16], c4_old[16]; unsigned char c3[16], c4[16]; unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; int num = 0; for (UINT64 t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) { p1[i] = rand() & 0xff; p1_old[i] = p1[i]; } // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) { p2[i] = p1[i] ^ dp[i]; p2_old[i] = p2[i]; } enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) { c3[i] = p1[i] ^ dc[i]; c3_old[i] = c3[i]; } // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) { c4[i] = p2[i] ^ dc[i]; c4_old[i] = c4[i]; } dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; printf("%s\n", "A right quartet found :)\n"); printf("p1: "); string_state(p1_old, ver); printf("\n"); printf("p2: "); string_state(p2_old, ver); printf("\n"); printf("p3: "); string_state(c3, ver); printf("\n"); printf("p4: "); string_state(c4, ver); printf("\n"); printf("c1: "); string_state(p1, ver); printf("\n"); printf("c2: "); string_state(p2, ver); printf("\n"); printf("c3: "); string_state(c3_old, ver); printf("\n"); printf("c4: "); string_state(c4_old, ver); printf("\n"); printf("k1: "); string_tweak(k1, ver); printf("\n"); printf("k2: "); string_tweak(k2, ver); printf("\n"); printf("k3: "); string_tweak(k3, ver); printf("\n"); printf("k4: "); string_tweak(k4, ver); printf("\n"); } } return num; } UINT64 send_boomerangs(int R, int ver, int N1, UINT64 N2, UINT64 N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution UINT64 *NUM; NUM = (UINT64*)malloc(N1 * sizeof(UINT64)); int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %llu * %llu = 2^(%f)\n",\ N1, N2, N3, (double)(log(N1 * N2 * N3) / log(2))); printf("#Queries per thread = (#Bunches per thread) * (#Queries per bunch) = %llu * %llu = 2^(%f)\n", N2, N3, (double)(log(N2 * N3) / log(2))); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); init_prng(ID); for (UINT64 j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, dk1, dk2); if ((j & STEP) == 0){ printf("PID: %d \t Bunch Number: %llu/%llu\n", ID, j, N2); } } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); UINT64 sum = 0; UINT64 sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; free(NUM); printf("sum = %llu\n", sum); sum_temp = (double)(N1 * N2 * N3) / (double)sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main() { // init_prng(atoi(argv[1])); unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int n = 1; // Number of indipendent experiments int R = 22; // Number of rounds int ver = 2; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "0000000000000200"; char dc_str[] = "5605060000450605"; char dk1_str[] = "0000000001000000000000000B0000000000000008000000"; char dk2_str[] = "000000000020000000000000003000000000000000D00000"; // Expected probability: 2^(-38.84) // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg1 = 15; int deg2 = 20; UINT64 N2 = (UINT64)1 << deg1; // Number of bunches per thread: N2 = 2^(deg) UINT64 N3 = (UINT64)1 << deg2; // Number of queries per bunch: N3 //################### Number of total queries : N1*N2*N3 ############### UINT64 sum = 0; for (int i = 0; i < n; i++) { sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); } char name[30]; sprintf(name, "result_%d.txt", PROGRAMNUMBER); fic = fopen(name, "w"); fprintf(fic, "\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); fclose(fic); printf("Program number = %d", PROGRAMNUMBER); printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); // sum = (double)(n * N1 * N2 * N3) / sum; // printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2)); char keyboard_input; keyboard_input = getchar(); return 0; } //g++ rumi-64-192-22r.c -o rumi-64-192-22r -fopenmp -O3 --std=c++11
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if ((noise_traits & CopyPixelTrait) != 0) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,0,0,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits = GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *p, *value; Image *image; MagickBooleanType status; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } char *q, *subexpression; double alpha, gamma; register const char *p; *beta=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta, exception); FxReturn(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); if (strlen(subexpression) != 0) subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } *subexpression='\0'; if (strlen(expression) > 6) (void) CopyMagickString(subexpression,expression+6, MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); FxReturn(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); FxReturn(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta); FxReturn(gamma); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); } while (fabs(alpha) >= MagickEpsilon); FxReturn(*beta); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *canvas_view, *implode_view, *interpolate_view; double radius; Image *canvas_image, *implode_image; MagickBooleanType status; MagickOffsetType progress; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception); implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); implode_image=DestroyImage(implode_image); return((Image *) NULL); } /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*canvas_image->columns; center.y=0.5*canvas_image->rows; radius=center.x; if (canvas_image->columns > canvas_image->rows) scale.y=(double) canvas_image->columns/(double) canvas_image->rows; else if (canvas_image->columns < canvas_image->rows) { scale.x=(double) canvas_image->rows/(double) canvas_image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas_image,exception); interpolate_view=AcquireVirtualCacheView(canvas_image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,implode_image,canvas_image->rows,1) #endif for (y=0; y < (ssize_t) canvas_image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas_image->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas_image,i); PixelTrait traits = GetPixelChannelTraits(canvas_image,channel); PixelTrait implode_traits = GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(canvas_image,interpolate_view, implode_image,method,(double) (factor*delta.x/scale.x+center.x), (double) (factor*delta.y/scale.y+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress++, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas_image=DestroyImage(canvas_image); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel = GetPixelChannelChannel(morph_image,i); PixelTrait traits = GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if ((morph_traits & CopyPixelTrait) != 0) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char *text; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption, exception); if (text != (char *) NULL) { char geometry[MagickPathExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue, &metrics,&text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(stereo_image,GetPixelRed(left_image,p),r); SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r); SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *canvas_view, *interpolate_view, *swirl_view; double radius; Image *canvas_image, *swirl_image; MagickBooleanType status; MagickOffsetType progress; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception); swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } /* Compute scaling factor. */ center.x=(double) canvas_image->columns/2.0; center.y=(double) canvas_image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (canvas_image->columns > canvas_image->rows) scale.y=(double) canvas_image->columns/(double) canvas_image->rows; else if (canvas_image->columns < canvas_image->rows) scale.x=(double) canvas_image->rows/(double) canvas_image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas_image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1) #endif for (y=0; y < (ssize_t) canvas_image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas_image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas_image,i); PixelTrait traits = GetPixelChannelTraits(canvas_image,channel); PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image, channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(canvas_image,interpolate_view, swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x), (double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q, exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(canvas_image,SwirlImageTag,progress++, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas_image=DestroyImage(canvas_image); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,0,0,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; GetPixelInfo(image,&pixel); weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black* (1.0-(4.0*(weight*weight))); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); return((Image *) NULL); } canvas->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (oval_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas,FlattenLayer,exception); canvas=DestroyImage(canvas); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *canvas_image_view, *wave_view; Image *canvas_image, *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlpha(canvas_image,OpaqueAlpha,exception); wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t) (canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); wave_image=DestroyImage(wave_image); return((Image *) NULL); } /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { canvas_image=DestroyImage(canvas_image); wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; canvas_image_view=AcquireVirtualCacheView(canvas_image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(canvas_image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(canvas_image,canvas_image_view, wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception); if (status == MagickFalse) break; p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(canvas_image,WaveImageTag,progress++, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); canvas_image_view=DestroyCacheView(canvas_image_view); canvas_image=DestroyImage(canvas_image); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1, GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
sections.c
/* $ gcc -fopenmp -O2 src/sections.c -o bin/sections $ export OMP_NUM_THREADS=4 $ ./bin/sections En funcB: esta sección la ejecuta el thread 3 En funcA: esta sección la ejecuta el thread 2 La ejecuta la primera que llegue */ #include <stdio.h> #include <omp.h> void funcA() { printf("En funcA: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } void funcB() { printf("En funcB: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } main() { #pragma omp parallel { #pragma omp sections { #pragma omp section (void) funcA(); #pragma omp section (void) funcB(); } } }
kernel_iq.c
/* ########################################################## # # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # !! !! # # !! KEEP THIS CODE CONSISTENT WITH KERNELPY.PY !! # # !! !! # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # # ########################################################## */ // NOTE: the following macros are defined in generate.py: // // MAX_PD : the maximum number of dispersity loops allowed for this model, // which will be at most modelinfo.MAX_PD. // NUM_PARS : the number of parameters in the parameter table // NUM_VALUES : the number of values to skip at the start of the // values array before you get to the dispersity values. // PARAMETER_TABLE : list of parameter declarations used to create the // ParameterTable type. // KERNEL_NAME : model_Iq, model_Iqxy or model_Imagnetic. This code is // included three times, once for each kernel type. // MAGNETIC : defined when the magnetic kernel is being instantiated // NUM_MAGNETIC : the number of magnetic parameters // MAGNETIC_PARS : a comma-separated list of indices to the sld // parameters in the parameter table. // TRANSLATION_VARS(table) : series of intermediate expressions used to // compute parameter substitions when reparameterizing a model. // VALID(table) : test if the current point is feesible to calculate. // CALL_VOLUME(form, shell, table) : assign form and shell values. // CALL_RADIUS_EFFECTIVE(mode, table) : call the R_eff function. // CALL_IQ(q, table) : call the Iq function for 1D calcs. // CALL_IQ_A(q, table) : call the Iq function with |q| for 2D data. // CALL_FQ(q, F1, F2, table) : call the Fq function for 1D calcs. // CALL_FQ_A(q, F1, F2, table) : call the Iq function with |q| for 2D data. // CALL_IQ_AC(qa, qc, table) : call the Iqxy function for symmetric shapes // CALL_IQ_ABC(qa, qc, table) : call the Iqxy function for asymmetric shapes // CALL_IQ_XY(qx, qy, table) : call the Iqxy function for arbitrary models // PROJECTION : equirectangular=1, sinusoidal=2 // see explore/jitter.py for definitions. #ifndef _PAR_BLOCK_ // protected block so we can include this code twice. #define _PAR_BLOCK_ typedef struct { #if MAX_PD > 0 int32_t pd_par[MAX_PD]; // id of the nth dispersity variable int32_t pd_length[MAX_PD]; // length of the nth dispersity weight vector int32_t pd_offset[MAX_PD]; // offset of pd weights in the value & weight vector int32_t pd_stride[MAX_PD]; // stride to move to the next index at this level #endif // MAX_PD > 0 int32_t num_eval; // total number of voxels in hypercube int32_t num_weights; // total length of the weights vector int32_t num_active; // number of non-trivial pd loops int32_t theta_par; // id of first orientation variable } ProblemDetails; // Intel HD 4000 needs private arrays to be a multiple of 4 long typedef struct { PARAMETER_TABLE } ParameterTable; typedef union { ParameterTable table; double vector[4*((NUM_PARS+3)/4)]; } ParameterBlock; #endif // _PAR_BLOCK_ #if defined(MAGNETIC) && NUM_MAGNETIC > 0 // ===== Helper functions for magnetism ===== // vector algebra void SET_VEC(double *vector, double v0, double v1, double v2) { vector[0] = v0; vector[1] = v1; vector[2] = v2; } void SCALE_VEC(double *vector, double a) { vector[0] = a*vector[0]; vector[1] = a*vector[1]; vector[2] = a*vector[2]; } void ADD_VEC(double *result_vec, double *vec1, double *vec2) { result_vec[0] = vec1[0] + vec2[0]; result_vec[1] = vec1[1] + vec2[1]; result_vec[2] = vec1[2] + vec2[2]; } static double SCALAR_VEC( double *vec1, double *vec2) { return vec1[0] * vec2[0] + vec1[1] * vec2[1] + vec1[2] * vec2[2]; } static double MAG_VEC( double *vec) { return sqrt(SCALAR_VEC(vec,vec)); } void ORTH_VEC(double *result_vec, double *vec1, double *vec2) { double scale = SCALAR_VEC(vec1,vec2) / SCALAR_VEC(vec2,vec2); result_vec[0] = vec1[0] - scale * vec2[0]; result_vec[1] = vec1[1] - scale * vec2[1]; result_vec[2] = vec1[2] - scale * vec2[2]; } // Return value restricted between low and high static double clip(double value, double low, double high) { return (value < low ? low : (value > high ? high : value)); } // Compute spin cross sections given in_spin and out_spin // To convert spin cross sections to sld b: // uu * (sld - m_perp_x); // dd * (sld + m_perp_x); // ud * (m_perp_y - 1j*m_perp_z); // du * (m_perp_y + 1j*m_perp_z); //(x,y,z) is a local magnetic coordinate system. m_perp_x denotes the magnetic scattering vector along the polarisation and m_perp_y the component along the scattering vector, m_perpz is orthogonal to the others. // weights for spin crosssections: dd du real, ud real, uu, du imag, ud imag static void set_spin_weights(double in_spin, double out_spin, double weight[6]) { double norm; in_spin = clip(in_spin, 0.0, 1.0); out_spin = clip(out_spin, 0.0, 1.0); // Previous version of this function took the square root of the weights, // under the assumption that // // w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) // // However, since the weights are applied to the final intensity and // are not interned inside the I(q) function, we want the full // weight and not the square root. Anyway no function will ever use // set_spin_weights as part of calculating an amplitude, as the weights are // related to polarisation efficiency of the instrument. The weights serve to // construct various magnet scattering cross sections, which are linear combinations // of the spin-resolved cross sections. The polarisation efficiency e_in and e_out // are parameters ranging from 0.5 (unpolarised) beam to 1 (perfect optics). // For in_spin or out_spin <0.5 one assumes a CS, where the spin is reversed/flipped // with respect to the initial supermirror polariser. The actual polarisation efficiency // in this case is however e_in/out = 1-in/out_spin. if (out_spin < 0.5){norm=1-out_spin;} else{norm=out_spin;} // The norm is needed to make sure that the scattering cross sections are //correctly weighted, such that the sum of spin-resolved measurements adds up to // the unpolarised or half-polarised scattering cross section. No intensity weighting // needed on the incoming polariser side (assuming that a user), has normalised // to the incoming flux with polariser in for SANSPOl and unpolarised beam, respectively. weight[0] = (1.0-in_spin) * (1.0-out_spin) / norm; // dd weight[1] = (1.0-in_spin) * out_spin / norm; // du weight[2] = in_spin * (1.0-out_spin) / norm; // ud weight[3] = in_spin * out_spin / norm; // uu weight[4] = weight[1]; // du.imag weight[5] = weight[2]; // ud.imag } // Compute the magnetic sld static double mag_sld( const unsigned int xs, // 0=dd, 1=du.real, 2=ud.real, 3=uu, 4=du.imag, 5=ud.imag const double qx, const double qy, const double px, const double py, const double pz, const double sld, const double mx, const double my, const double mz ) { double Mvector[3]; double Pvector[3]; double qvector[3]; double rhom[3]; double Mperp[3]; const double qsq = sqrt(qx*qx + qy*qy); SET_VEC(qvector, qx / qsq, qy / qsq, 0); SET_VEC(Mvector, mx, my, mz); SET_VEC(Pvector, px, py, pz); ORTH_VEC(Mperp, Mvector, qvector); if (xs < 4) { switch (xs) { default: // keep compiler happy; condition ensures xs in [0,1,2,3] case 0: // dd => sld - D Pvector \cdot Mperp return sld - SCALAR_VEC(Pvector, Mperp);; case 1: // du.real => length of vector MperpPperpQ: | MperpP - (MperpP \cdot qvector) qvector | with MperpP= ORTH_VEC(MperpP, Mperp, Pvector); ORTH_VEC(rhom, Mperp, Pvector); ORTH_VEC(rhom, rhom, qvector); return MAG_VEC(rhom); case 2: // ud.real => length of vector MperpPperpQ ORTH_VEC(rhom, Mperp, Pvector); ORTH_VEC(rhom, rhom, qvector); return MAG_VEC(rhom); case 3: // uu => sld + D Pvector \cdot Mperp return sld + SCALAR_VEC(Pvector, Mperp); } } else { if (xs== 4) { ORTH_VEC(rhom, Mperp, Pvector); return - SCALAR_VEC(rhom, qvector); // du.imag => - i MperpP \cdot qvector } else { // index == 5 ORTH_VEC(rhom, Mperp, Pvector); return + SCALAR_VEC(rhom, qvector); // du.imag => + i MperpP \cdot qvector } } } #endif // ===== Helper functions for orientation and jitter ===== // To change the definition of the angles, run explore/angles.py, which // uses sympy to generate the equations. #if !defined(_QAC_SECTION) && defined(CALL_IQ_AC) #define _QAC_SECTION typedef struct { double R31, R32; } QACRotation; // Fill in the rotation matrix R from the view angles (theta, phi) and the // jitter angles (dtheta, dphi). This matrix can be applied to all of the // (qx, qy) points in the image to produce R*[qx,qy]' = [qa,qc]' static void qac_rotation( QACRotation *rotation, double theta, double phi, double dtheta, double dphi) { double sin_theta, cos_theta; double sin_phi, cos_phi; // reverse view matrix SINCOS(theta*M_PI_180, sin_theta, cos_theta); SINCOS(phi*M_PI_180, sin_phi, cos_phi); const double V11 = cos_phi*cos_theta; const double V12 = sin_phi*cos_theta; const double V21 = -sin_phi; const double V22 = cos_phi; const double V31 = sin_theta*cos_phi; const double V32 = sin_phi*sin_theta; // reverse jitter matrix SINCOS(dtheta*M_PI_180, sin_theta, cos_theta); SINCOS(dphi*M_PI_180, sin_phi, cos_phi); const double J31 = sin_theta; const double J32 = -sin_phi*cos_theta; const double J33 = cos_phi*cos_theta; // reverse matrix rotation->R31 = J31*V11 + J32*V21 + J33*V31; rotation->R32 = J31*V12 + J32*V22 + J33*V32; } // Apply the rotation matrix returned from qac_rotation to the point (qx,qy), // returning R*[qx,qy]' = [qa,qc]' static void qac_apply( QACRotation *rotation, double qx, double qy, double *qab_out, double *qc_out) { // Indirect calculation of qab, from qab^2 = |q|^2 - qc^2 const double dqc = rotation->R31*qx + rotation->R32*qy; const double dqab_sq = -dqc*dqc + qx*qx + qy*qy; //*qab_out = sqrt(fabs(dqab_sq)); *qab_out = dqab_sq > 0.0 ? sqrt(dqab_sq) : 0.0; *qc_out = dqc; } #endif // _QAC_SECTION #if !defined(_QABC_SECTION) && defined(CALL_IQ_ABC) #define _QABC_SECTION typedef struct { double R11, R12; double R21, R22; double R31, R32; } QABCRotation; // Fill in the rotation matrix R from the view angles (theta, phi, psi) and the // jitter angles (dtheta, dphi, dpsi). This matrix can be applied to all of the // (qx, qy) points in the image to produce R*[qx,qy]' = [qa,qb,qc]' static void qabc_rotation( QABCRotation *rotation, double theta, double phi, double psi, double dtheta, double dphi, double dpsi) { double sin_theta, cos_theta; double sin_phi, cos_phi; double sin_psi, cos_psi; // reverse view matrix SINCOS(theta*M_PI_180, sin_theta, cos_theta); SINCOS(phi*M_PI_180, sin_phi, cos_phi); SINCOS(psi*M_PI_180, sin_psi, cos_psi); const double V11 = -sin_phi*sin_psi + cos_phi*cos_psi*cos_theta; const double V12 = sin_phi*cos_psi*cos_theta + sin_psi*cos_phi; const double V21 = -sin_phi*cos_psi - sin_psi*cos_phi*cos_theta; const double V22 = -sin_phi*sin_psi*cos_theta + cos_phi*cos_psi; const double V31 = sin_theta*cos_phi; const double V32 = sin_phi*sin_theta; // reverse jitter matrix SINCOS(dtheta*M_PI_180, sin_theta, cos_theta); SINCOS(dphi*M_PI_180, sin_phi, cos_phi); SINCOS(dpsi*M_PI_180, sin_psi, cos_psi); const double J11 = cos_psi*cos_theta; const double J12 = sin_phi*sin_theta*cos_psi + sin_psi*cos_phi; const double J13 = sin_phi*sin_psi - sin_theta*cos_phi*cos_psi; const double J21 = -sin_psi*cos_theta; const double J22 = -sin_phi*sin_psi*sin_theta + cos_phi*cos_psi; const double J23 = sin_phi*cos_psi + sin_psi*sin_theta*cos_phi; const double J31 = sin_theta; const double J32 = -sin_phi*cos_theta; const double J33 = cos_phi*cos_theta; // reverse matrix rotation->R11 = J11*V11 + J12*V21 + J13*V31; rotation->R12 = J11*V12 + J12*V22 + J13*V32; rotation->R21 = J21*V11 + J22*V21 + J23*V31; rotation->R22 = J21*V12 + J22*V22 + J23*V32; rotation->R31 = J31*V11 + J32*V21 + J33*V31; rotation->R32 = J31*V12 + J32*V22 + J33*V32; } // Apply the rotation matrix returned from qabc_rotation to the point (qx,qy), // returning R*[qx,qy]' = [qa,qb,qc]' static void qabc_apply( QABCRotation *rotation, double qx, double qy, double *qa_out, double *qb_out, double *qc_out) { *qa_out = rotation->R11*qx + rotation->R12*qy; *qb_out = rotation->R21*qx + rotation->R22*qy; *qc_out = rotation->R31*qx + rotation->R32*qy; } #endif // _QABC_SECTION // ==================== KERNEL CODE ======================== kernel void KERNEL_NAME( int32_t nq, // number of q values const int32_t pd_start, // where we are in the dispersity loop const int32_t pd_stop, // where we are stopping in the dispersity loop pglobal const ProblemDetails *details, pglobal const double *values, // parameter values and distributions pglobal const double *q, // nq q values, with padding to boundary pglobal double *result, // nq+1 return values, again with padding const double cutoff, // cutoff in the dispersity weight product int32_t radius_effective_mode // which effective radius to compute ) { #if defined(USE_GPU) // who we are and what element we are working with #if defined(USE_OPENCL) const int q_index = get_global_id(0); #else // USE_CUDA const int q_index = threadIdx.x + blockIdx.x * blockDim.x; #endif if (q_index >= nq) return; #else // Define q_index here so that debugging statements can be written to work // for both OpenCL and DLL using: // if (q_index == 0) {printf(...);} int q_index = 0; #endif // ** Fill in the local values table ** // Storage for the current parameter values. // These will be updated as we walk the dispersity mesh. ParameterBlock local_values; // values[0] is scale // values[1] is background #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i=0; i < NUM_PARS; i++) { local_values.vector[i] = values[2+i]; //if (q_index==0) printf("p%d = %g\n",i, local_values.vector[i]); } //if (q_index==0) printf("NUM_VALUES:%d NUM_PARS:%d MAX_PD:%d\n", NUM_VALUES, NUM_PARS, MAX_PD); //if (q_index==0) printf("start:%d stop:%d\n", pd_start, pd_stop); // ** Precompute magnatism values ** #if defined(MAGNETIC) && NUM_MAGNETIC>0 // Location of the sld parameters in the parameter vector. // These parameters are updated with the effective sld due to magnetism. const int32_t slds[] = { MAGNETIC_PARS }; // Interpret polarization cross section. // up_frac_i = values[NUM_PARS+2]; // up_frac_f = values[NUM_PARS+3]; // up_angle = values[NUM_PARS+4]; // up_phi = values[NUM_PARS+5]; // TODO: could precompute more magnetism parameters before calling the kernel. double xs_weights[8]; // uu, ud real, du real, dd, ud imag, du imag, fill, fill double cos_mspin, sin_mspin; double cos_mphi, sin_mphi; set_spin_weights(values[NUM_PARS+2], values[NUM_PARS+3], xs_weights); SINCOS(values[NUM_PARS+4]*M_PI_180, sin_mspin, cos_mspin); SINCOS(values[NUM_PARS+5]*M_PI_180, sin_mphi, cos_mphi); #endif // MAGNETIC // ** Fill in the initial results ** // If pd_start is zero that means that we are starting a new calculation, // and must initialize the result to zero. Otherwise, we are restarting // the calculation from somewhere in the middle of the dispersity mesh, // and we update the value rather than reset it. Similarly for the // normalization factor, which is stored as the final value in the // results vector (one past the number of q values). // // The code differs slightly between opencl and dll since opencl is only // seeing one q value (stored in the variable "this_F2") while the dll // version must loop over all q. #if defined(CALL_FQ) double weight_norm = (pd_start == 0 ? 0.0 : result[2*nq]); double weighted_form = (pd_start == 0 ? 0.0 : result[2*nq+1]); double weighted_shell = (pd_start == 0 ? 0.0 : result[2*nq+2]); double weighted_radius = (pd_start == 0 ? 0.0 : result[2*nq+3]); #else double weight_norm = (pd_start == 0 ? 0.0 : result[nq]); double weighted_form = (pd_start == 0 ? 0.0 : result[nq+1]); double weighted_shell = (pd_start == 0 ? 0.0 : result[nq+2]); double weighted_radius = (pd_start == 0 ? 0.0 : result[nq+3]); #endif #if defined(USE_GPU) #if defined(CALL_FQ) double this_F2 = (pd_start == 0 ? 0.0 : result[2*q_index+0]); double this_F1 = (pd_start == 0 ? 0.0 : result[2*q_index+1]); #else double this_F2 = (pd_start == 0 ? 0.0 : result[q_index]); #endif #else // !USE_GPU if (pd_start == 0) { #ifdef USE_OPENMP #pragma omp parallel for #endif #if defined(CALL_FQ) // 2*nq for F^2,F pairs for (int q_index=0; q_index < 2*nq; q_index++) result[q_index] = 0.0; #else for (int q_index=0; q_index < nq; q_index++) result[q_index] = 0.0; #endif } //if (q_index==0) printf("start %d %g %g\n", pd_start, pd_norm, result[0]); #endif // !USE_GPU // ====== macros to set up the parts of the loop ======= /* Based on the level of the loop, uses C preprocessor magic to construct level-specific looping variables, including these from loop level 3: int n3 : length of loop for mesh level 3 int i3 : current position in the loop for level 3, which is calculated from a combination of pd_start, pd_stride[3] and pd_length[3]. int p3 : is the index into the parameter table for mesh level 3 double v3[] : pointer into dispersity array to values for loop 3 double w3[] : pointer into dispersity array to weights for loop 3 double weight3 : the product of weights from levels 3 and up, computed as weight5*weight4*w3[i3]. Note that we need an outermost value weight5 set to 1.0 for this to work properly. After expansion, the loop struction will look like the following: // --- PD_INIT(4) --- const int n4 = pd_length[4]; const int p4 = pd_par[4]; pglobal const double *v4 = pd_value + pd_offset[4]; pglobal const double *w4 = pd_weight + pd_offset[4]; int i4 = (pd_start/pd_stride[4])%n4; // position in level 4 at pd_start // --- PD_INIT(3) --- const int n3 = pd_length[3]; ... int i3 = (pd_start/pd_stride[3])%n3; // position in level 3 at pd_start PD_INIT(2) PD_INIT(1) PD_INIT(0) // --- PD_OUTERMOST_WEIGHT(5) --- const double weight5 = 1.0; // --- PD_OPEN(4,5) --- while (i4 < n4) { parameter[p4] = v4[i4]; // set the value for pd parameter 4 at this mesh point const double weight4 = w4[i4] * weight5; // from PD_OPEN(3,4) while (i3 < n3) { parameter[p3] = v3[i3]; // set the value for pd parameter 3 at this mesh point const double weight3 = w3[i3] * weight4; PD_OPEN(3,2) PD_OPEN(2,1) PD_OPEN(0,1) // ... main loop body ... APPLY_PROJECTION // convert jitter values to spherical coords BUILD_ROTATION // construct the rotation matrix qxy => qabc for each q FETCH_Q // set qx,qy from the q input vector APPLY_ROTATION // convert qx,qy to qa,qb,qc CALL_KERNEL // F2 = Iqxy(qa, qb, qc, p1, p2, ...) ++step; // increment counter representing position in dispersity mesh PD_CLOSE(0) PD_CLOSE(1) PD_CLOSE(2) // --- PD_CLOSE(3) --- if (step >= pd_stop) break; ++i3; } i3 = 0; // reset loop counter for next round through the loop // --- PD_CLOSE(4) --- if (step >= pd_stop) break; ++i4; } i4 = 0; // reset loop counter even though no more rounds through the loop */ // ** prepare inner loops ** // Depending on the shape type (radial, axial, triaxial), the variables // and calling parameters in the loop body will be slightly different. // Macros capture the differences in one spot so the rest of the code // is easier to read. The code below both declares variables for the // inner loop and defines the macros that use them. #if defined(CALL_FQ) // COMPUTE_F1_F2 is true // unoriented 1D returning <F> and <F^2> // Note that F1 and F2 are returned from CALL_FQ by reference, and the // user of the CALL_KERNEL macro below is assuming that F1 and F2 are defined. double qk; double F1, F2; #define FETCH_Q() do { qk = q[q_index]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_FQ(qk,F1,F2,local_values.table) #elif defined(CALL_FQ_A) // unoriented 2D return <F> and <F^2> // Note that the CALL_FQ_A macro is computing _F1_slot and _F2_slot by // reference then returning _F2_slot. We are calling them _F1_slot and // _F2_slot here so they don't conflict with _F1 and _F2 in the macro // expansion, or with the use of F2 = CALL_KERNEL() when it is used below. double qx, qy; double _F1_slot, _F2_slot; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_FQ_A(sqrt(qx*qx+qy*qy),_F1_slot,_F2_slot,local_values.table) #elif defined(CALL_IQ) // unoriented 1D return <F^2> double qk; #define FETCH_Q() do { qk = q[q_index]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_IQ(qk,local_values.table) #elif defined(CALL_IQ_A) // unoriented 2D double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_IQ_A(sqrt(qx*qx+qy*qy), local_values.table) #elif defined(CALL_IQ_AC) // oriented symmetric 2D double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) double qa, qc; QACRotation rotation; // theta, phi, dtheta, dphi are defined below in projection to avoid repeated code. #define BUILD_ROTATION() qac_rotation(&rotation, theta, phi, dtheta, dphi); #define APPLY_ROTATION() qac_apply(&rotation, qx, qy, &qa, &qc) #define CALL_KERNEL() CALL_IQ_AC(qa, qc, local_values.table) #elif defined(CALL_IQ_ABC) // oriented asymmetric 2D double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) double qa, qb, qc; QABCRotation rotation; // theta, phi, dtheta, dphi are defined below in projection to avoid repeated code. // psi and dpsi are only for IQ_ABC, so they are processed here. const double psi = values[details->theta_par+4]; local_values.table.psi = 0.; #define BUILD_ROTATION() qabc_rotation(&rotation, theta, phi, psi, dtheta, dphi, local_values.table.psi) #define APPLY_ROTATION() qabc_apply(&rotation, qx, qy, &qa, &qb, &qc) #define CALL_KERNEL() CALL_IQ_ABC(qa, qb, qc, local_values.table) #elif defined(CALL_IQ_XY) // direct call to qx,qy calculator double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_IQ_XY(qx, qy, local_values.table) #endif // Define APPLY_PROJECTION depending on model symmetries. We do this outside // the previous if block so that we don't need to repeat the identical // logic in the IQ_AC and IQ_ABC branches. This will become more important // if we implement more projections, or more complicated projections. #if defined(CALL_IQ) || defined(CALL_IQ_A) || defined(CALL_FQ) || defined(CALL_FQ_A) // no orientation #define APPLY_PROJECTION() const double weight=weight0 #elif defined(CALL_IQ_XY) // pass orientation to the model // CRUFT: support oriented model which define Iqxy rather than Iqac or Iqabc // Need to plug the values for the orientation angles back into parameter // table in case they were overridden by the orientation offset. This // means that orientation dispersity will not work for these models, but // it was broken anyway, so no matter. Still want to provide Iqxy in case // the user model wants full control of orientation/magnetism. #if defined(HAVE_PSI) const double theta = values[details->theta_par+2]; const double phi = values[details->theta_par+3]; const double psi = values[details->theta_par+4]; double weight; #define APPLY_PROJECTION() do { \ local_values.table.theta = theta; \ local_values.table.phi = phi; \ local_values.table.psi = psi; \ weight=weight0; \ } while (0) #elif defined(HAVE_THETA) const double theta = values[details->theta_par+2]; const double phi = values[details->theta_par+3]; double weight; #define APPLY_PROJECTION() do { \ local_values.table.theta = theta; \ local_values.table.phi = phi; \ weight=weight0; \ } while (0) #else #define APPLY_PROJECTION() const double weight=weight0 #endif #else // apply jitter and view before calling the model // Grab the "view" angles (theta, phi, psi) from the initial parameter table. const double theta = values[details->theta_par+2]; const double phi = values[details->theta_par+3]; // Make sure jitter angle defaults to zero if there is no jitter distribution local_values.table.theta = 0.; local_values.table.phi = 0.; // The "jitter" angles (dtheta, dphi, dpsi) are stored with the // dispersity values and copied to the local parameter table as // we go through the mesh. double dtheta, dphi, weight; #if PROJECTION == 1 // equirectangular #define APPLY_PROJECTION() do { \ dtheta = local_values.table.theta; \ dphi = local_values.table.phi; \ weight = fabs(cos(dtheta*M_PI_180)) * weight0; \ } while (0) #elif PROJECTION == 2 // sinusoidal #define APPLY_PROJECTION() do { \ dtheta = local_values.table.theta; \ dphi = local_values.table.phi; \ weight = weight0; \ if (dtheta != 90.0) dphi /= cos(dtheta*M_PI_180); \ else if (dphi != 0.0) weight = 0.; \ if (fabs(dphi) >= 180.) weight = 0.; \ } while (0) #endif #endif // done defining APPLY_PROJECTION // ** define looping macros ** // Define looping variables #define PD_INIT(_LOOP) \ const int n##_LOOP = details->pd_length[_LOOP]; \ const int p##_LOOP = details->pd_par[_LOOP]; \ pglobal const double *v##_LOOP = pd_value + details->pd_offset[_LOOP]; \ pglobal const double *w##_LOOP = pd_weight + details->pd_offset[_LOOP]; \ int i##_LOOP = (pd_start/details->pd_stride[_LOOP])%n##_LOOP; // Jump into the middle of the dispersity loop #define PD_OPEN(_LOOP,_OUTER) \ while (i##_LOOP < n##_LOOP) { \ local_values.vector[p##_LOOP] = v##_LOOP[i##_LOOP]; \ const double weight##_LOOP = w##_LOOP[i##_LOOP] * weight##_OUTER; // create the variable "weight#=1.0" where # is the outermost level+1 (=MAX_PD). #define _PD_OUTERMOST_WEIGHT(_n) const double weight##_n = 1.0; #define PD_OUTERMOST_WEIGHT(_n) _PD_OUTERMOST_WEIGHT(_n) // Close out the loop #define PD_CLOSE(_LOOP) \ if (step >= pd_stop) break; \ ++i##_LOOP; \ } \ i##_LOOP = 0; // ====== construct the loops ======= // Pointers to the start of the dispersity and weight vectors, if needed. #if MAX_PD>0 pglobal const double *pd_value = values + NUM_VALUES; pglobal const double *pd_weight = pd_value + details->num_weights; #endif // The variable "step" is the current position in the dispersity loop. // It will be incremented each time a new point in the mesh is accumulated, // and used to test whether we have reached pd_stop. int step = pd_start; // *** define loops for each of 0, 1, 2, ..., modelinfo.MAX_PD-1 *** // define looping variables #if MAX_PD>4 PD_INIT(4) #endif #if MAX_PD>3 PD_INIT(3) #endif #if MAX_PD>2 PD_INIT(2) #endif #if MAX_PD>1 PD_INIT(1) #endif #if MAX_PD>0 PD_INIT(0) #endif // open nested loops PD_OUTERMOST_WEIGHT(MAX_PD) #if MAX_PD>4 PD_OPEN(4,5) #endif #if MAX_PD>3 PD_OPEN(3,4) #endif #if MAX_PD>2 PD_OPEN(2,3) #endif #if MAX_PD>1 PD_OPEN(1,2) #endif #if MAX_PD>0 PD_OPEN(0,1) #endif //if (q_index==0) {printf("step:%d of %d, pars:",step,pd_stop); for (int i=0; i < NUM_PARS; i++) printf("p%d=%g ",i, local_values.vector[i]); printf("\n");} // ====== loop body ======= // Note: recalc all intermediates for each loop, because we don't know order TRANSLATION_VARS(local_values.table); if (VALID(local_values.table)) { APPLY_PROJECTION(); // Accumulate I(q) // Note: weight==0 must always be excluded if (weight > cutoff) { double form, shell; CALL_VOLUME(form, shell, local_values.table); weight_norm += weight; weighted_form += weight * form; weighted_shell += weight * shell; if (radius_effective_mode != 0) { weighted_radius += weight * CALL_RADIUS_EFFECTIVE(radius_effective_mode, local_values.table); } BUILD_ROTATION(); #if !defined(USE_GPU) // DLL needs to explicitly loop over the q values. #ifdef USE_OPENMP #pragma omp parallel for #endif for (q_index=0; q_index<nq; q_index++) #endif // !USE_GPU { FETCH_Q(); APPLY_ROTATION(); // ======= COMPUTE SCATTERING ========== #if defined(MAGNETIC) && NUM_MAGNETIC > 0 // Compute the scattering from the magnetic cross sections. double F2 = 0.0; const double qsq = qx * qx + qy * qy; if (qsq > 1.e-16) { // TODO: what is the magnetic scattering at q = 0 const double px = sin_mspin * cos_mphi; const double py = sin_mspin * sin_mphi; const double pz = cos_mspin; // loop over uu, ud real, du real, dd, ud imag, du imag for (unsigned int xs = 0; xs < 6; xs++) { const double xs_weight = xs_weights[xs]; if (xs_weight > 1.e-8) { // Since the cross section weight is significant, set the slds // to the effective slds for this cross section, call the // kernel, and add according to weight. for (int sk = 0; sk<NUM_MAGNETIC; sk++) { const int32_t mag_index = NUM_PARS + 6 + 3 * sk; const int32_t sld_index = slds[sk]; const double mx = values[mag_index]; const double my = values[mag_index + 1]; const double mz = values[mag_index + 2]; local_values.vector[sld_index] = mag_sld(xs, qx, qy, px, py, pz, values[sld_index + 2], mx, my, mz); //if (q_index==0) printf("%d: (qx,qy)=(%g,%g) xs=%d sld%d=%g p=(%g,%g) m=(%g,%g,%g)\n", // q_index, qx, qy, xs, sk, local_values.vector[sld_index], px, py, mx, my, mz); } F2 += xs_weight * CALL_KERNEL(); } } } #else // !MAGNETIC #if defined(CALL_FQ) CALL_KERNEL(); // sets F1 and F2 by reference #else const double F2 = CALL_KERNEL(); #endif #endif // !MAGNETIC //printf("q_index:%d %g %g %g %g\n", q_index, F2, weight0); #if defined(USE_GPU) #if defined(CALL_FQ) this_F2 += weight * F2; this_F1 += weight * F1; #else this_F2 += weight * F2; #endif #else // !USE_OPENCL #if defined(CALL_FQ) result[2*q_index+0] += weight * F2; result[2*q_index+1] += weight * F1; #else result[q_index] += weight * F2; #endif #endif // !USE_OPENCL } } } // close nested loops ++step; #if MAX_PD>0 PD_CLOSE(0) #endif #if MAX_PD>1 PD_CLOSE(1) #endif #if MAX_PD>2 PD_CLOSE(2) #endif #if MAX_PD>3 PD_CLOSE(3) #endif #if MAX_PD>4 PD_CLOSE(4) #endif // Remember the results and the updated norm. #if defined(USE_GPU) #if defined(CALL_FQ) result[2*q_index+0] = this_F2; result[2*q_index+1] = this_F1; #else result[q_index] = this_F2; #endif if (q_index == 0) #endif { #if defined(CALL_FQ) result[2*nq] = weight_norm; result[2*nq+1] = weighted_form; result[2*nq+2] = weighted_shell; result[2*nq+3] = weighted_radius; #else result[nq] = weight_norm; result[nq+1] = weighted_form; result[nq+2] = weighted_shell; result[nq+3] = weighted_radius; #endif } // ** clear the macros in preparation for the next kernel ** #undef PD_INIT #undef PD_OPEN #undef PD_CLOSE #undef FETCH_Q #undef APPLY_PROJECTION #undef BUILD_ROTATION #undef APPLY_ROTATION #undef CALL_KERNEL }
vednnConvolutionForward.c
#include "vednnConvolutionForward.h" #include <stdint.h> #ifdef VEDNN_USE_OPENMP #include <omp.h> extern int __vednn_omp_num_threads ; #endif static inline vednnError_t vednnConvolutionForward_wrapper( vednnConvForward_t pFunc, const vednnTensorParam_t *pParamIn, const void *pDataIn, const vednnFilterParam_t *pParamKernel, const void *pDataKernel, const vednnConvolutionParam_t *pParamConv, const vednnTensorParam_t *pParamOut, void *pDataOut ) { //#ifdef VEDNN_USE_OPENMP // if ( __vednn_omp_num_threads == 1 ) { // return pFunc(pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut) ; // } // else { vednnError_t rc = VEDNN_SUCCESS ; //#pragma omp parallel reduction(|:rc) // { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t allBatch = pParamIn->batch ; int64_t nBatch = allBatch / nthreads ; int64_t remain = allBatch % nthreads ; int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ; if( myBatch == 0 ) { rc |= VEDNN_SUCCESS ; } else { vednnTensorParam_t _pParamIn = *pParamIn ; _pParamIn.batch = myBatch ; vednnTensorParam_t _pParamOut = *pParamOut ; _pParamOut.batch = myBatch ; float* _pDataIn = ((float *)pDataIn) + batchBegin * pParamIn->channel * pParamIn->height * pParamIn->width ; float* _pDataOut = ((float *)pDataOut) + batchBegin * pParamOut->channel * pParamOut->height * pParamOut->width ; rc |= pFunc(&_pParamIn, (void*)_pDataIn, pParamKernel, pDataKernel, pParamConv, &_pParamOut, (void*) _pDataOut) ; } // } return rc ; // } //#else // return pFunc(pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut) ; //#endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnConvolutionForward( const vednnTensorParam_t *pParamIn, const void *pDataIn, const vednnFilterParam_t *pParamKernel, const void *pDataKernel, const vednnTensorParam_t *pParamOut, void *pDataOut, const vednnConvolutionParam_t *pParamConv, vednnConvolutionAlgorithm_t algo ) { if (algo == VEDNN_CONV_ALGORITHM_DIRECT) { // [todo] add variations if ( pParamOut->height * pParamOut->width <= 16 ) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_vecC, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut); } else if (pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1 && pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1 && pParamIn->height == pParamOut->height && pParamIn->width == pParamOut->width ) { if (pParamKernel->width == 1 && pParamKernel->height == 1) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_pad0_ker1, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut); } else if (pParamKernel->height == 3 && pParamKernel->width == 3) { if (pParamIn->channel == pParamConv->group) // aka inputChannelGroup==1 { if (pParamOut->width <= 128) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker3_c1_owU128, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker3_c1, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } else if (pParamKernel->inChannel % 1024 == 0) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker3_c1024x, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker3, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } else if (pParamKernel->height == 5 && pParamKernel->width == 5) { if( pParamOut->width <= 128 ) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker5_owU128, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker5, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } else if (pParamKernel->height == 2 && pParamKernel->width == 2) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame_ker2, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_padsame, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } else if ( pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1 && pParamConv->padHeight == 0 && pParamConv->padWidth == 0 && pParamOut->height == (pParamIn->height - pParamKernel->height) / pParamConv->strideHeight + 1 && pParamOut->width == (pParamIn->width - pParamKernel->width) / pParamConv->strideWidth + 1 ) { if (pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1 ) { if ( pParamKernel->height == 3 && pParamKernel->width == 3 && (pParamIn->width <= 256) && (pParamIn->width & 0x1) == 0 && (((uint64_t)pDataIn) & 0x7) == 0 && (pParamOut->width & 0x1) == 0 && (((uint64_t)pDataOut) & 0x7) == 0 ) { return vednnConvolutionForward_wrapper ( vednnConvolutionForward_direct_dil1_str1_pad0_ker3_iw2XU256_ow2X_ioaligned, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ) ; } else if (pParamOut->width <= 128) { return vednnConvolutionForward_wrapper ( vednnConvolutionForward_direct_dil1_str1_pad0_owU128, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_str1_pad0, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } else { if( pParamKernel->width == 1 && pParamKernel->height == 1 ) { if (pParamOut->width <= 128) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_pad0_owU128_ker1, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_pad0_ker1, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } else { if (pParamOut->width <= 128) { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_pad0_owU128, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_dil1_pad0, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } } } else { if (pParamOut->width <= 128) { return vednnConvolutionForward_wrapper ( vednnConvolutionForward_direct_owU128, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } else { return vednnConvolutionForward_wrapper( vednnConvolutionForward_direct_default, pParamIn, pDataIn, pParamKernel, pDataKernel, pParamConv, pParamOut, pDataOut ); } } } else { return VEDNN_ERROR_INVALID_PARAM ; } }
test_nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the OpenMP 4.5 NVECTOR * module implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_types.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #include "test_nvector.h" #include <omp.h> /* OpenMPDEV vector specific tests */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid); /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype length; /* vector length */ N_Vector U, V, W, X, Y, Z; /* test vectors */ int print_timing; /* turn timing on/off */ /* check input and set vector length */ if (argc < 3){ printf("ERROR: TWO (2) Inputs required: vector length and print timing \n"); return(-1); } length = (sunindextype) atol(argv[1]); if (length <= 0) { printf("ERROR: length of vector must be a positive integer \n"); return(-1); } print_timing = atoi(argv[2]); SetTiming(print_timing, 0); printf("Testing the OpenMP DEV N_Vector \n"); printf("Vector length %ld \n", (long int) length); printf("\n omp_get_default_device = %d \n", omp_get_default_device()); printf("\n omp_get_num_devices = %d \n", omp_get_num_devices()); printf("\n omp_get_initial_device = %d \n", omp_get_initial_device()); printf("\n omp_is_initial_device = %d \n", omp_is_initial_device()); /* Create new vectors */ W = N_VNewEmpty_OpenMPDEV(length); if (W == NULL) { printf("FAIL: Unable to create a new empty vector \n\n"); return(1); } X = N_VNew_OpenMPDEV(length); if (X == NULL) { N_VDestroy(W); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Check vector ID */ fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_OPENMPDEV, 0); /* Check vector length */ fails += Test_N_VGetLength(X, 0); /* Check vector communicator */ fails += Test_N_VGetCommunicator(X, NULL, 0); /* Test clone functions */ fails += Test_N_VCloneEmpty(X, 0); fails += Test_N_VClone(X, length, 0); fails += Test_N_VCloneEmptyVectorArray(5, X, 0); fails += Test_N_VCloneVectorArray(5, X, length, 0); /* Clone additional vectors for testing */ Y = N_VClone(X); if (Y == NULL) { N_VDestroy(W); N_VDestroy(X); printf("FAIL: Unable to create a new vector \n\n"); return(1); } Z = N_VClone(X); if (Z == NULL) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Standard vector operation tests */ printf("\nTesting standard vector operations:\n\n"); fails += Test_N_VConst(X, length, 0); fails += Test_N_VLinearSum(X, Y, Z, length, 0); fails += Test_N_VProd(X, Y, Z, length, 0); fails += Test_N_VDiv(X, Y, Z, length, 0); fails += Test_N_VScale(X, Z, length, 0); fails += Test_N_VAbs(X, Z, length, 0); fails += Test_N_VInv(X, Z, length, 0); fails += Test_N_VAddConst(X, Z, length, 0); fails += Test_N_VDotProd(X, Y, length, 0); fails += Test_N_VMaxNorm(X, length, 0); fails += Test_N_VWrmsNorm(X, Y, length, 0); fails += Test_N_VWrmsNormMask(X, Y, Z, length, 0); fails += Test_N_VMin(X, length, 0); fails += Test_N_VWL2Norm(X, Y, length, 0); fails += Test_N_VL1Norm(X, length, 0); fails += Test_N_VCompare(X, Z, length, 0); fails += Test_N_VInvTest(X, Z, length, 0); fails += Test_N_VConstrMask(X, Y, Z, length, 0); fails += Test_N_VMinQuotient(X, Y, length, 0); /* Fused and vector array operations tests (disabled) */ printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = N_VNew_OpenMPDEV(length); retval = N_VEnableFusedOps_OpenMPDEV(U, SUNFALSE); if (U == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(U, length, 0); fails += Test_N_VScaleAddMulti(U, length, 0); fails += Test_N_VDotProdMulti(U, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(U, length, 0); fails += Test_N_VScaleVectorArray(U, length, 0); fails += Test_N_VConstVectorArray(U, length, 0); fails += Test_N_VWrmsNormVectorArray(U, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(U, length, 0); fails += Test_N_VScaleAddMultiVectorArray(U, length, 0); fails += Test_N_VLinearCombinationVectorArray(U, length, 0); /* Fused and vector array operations tests (enabled) */ printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = N_VNew_OpenMPDEV(length); retval = N_VEnableFusedOps_OpenMPDEV(V, SUNTRUE); if (V == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(V, length, 0); fails += Test_N_VScaleAddMulti(V, length, 0); fails += Test_N_VDotProdMulti(V, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(V, length, 0); fails += Test_N_VScaleVectorArray(V, length, 0); fails += Test_N_VConstVectorArray(V, length, 0); fails += Test_N_VWrmsNormVectorArray(V, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(V, length, 0); fails += Test_N_VScaleAddMultiVectorArray(V, length, 0); fails += Test_N_VLinearCombinationVectorArray(V, length, 0); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(X, Y, length, 0); fails += Test_N_VMaxNormLocal(X, length, 0); fails += Test_N_VMinLocal(X, length, 0); fails += Test_N_VL1NormLocal(X, length, 0); fails += Test_N_VWSqrSumLocal(X, Y, length, 0); fails += Test_N_VWSqrSumMaskLocal(X, Y, Z, length, 0); fails += Test_N_VInvTestLocal(X, Z, length, 0); fails += Test_N_VConstrMaskLocal(X, Y, Z, length, 0); fails += Test_N_VMinQuotientLocal(X, Y, length, 0); /* Free vectors */ N_VDestroy(U); N_VDestroy(V); N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); /* Print result */ if (fails) { printf("FAIL: NVector module failed %i tests \n\n", fails); } else { printf("SUCCESS: NVector module passed all tests \n\n"); } return(fails); } /* ---------------------------------------------------------------------- * OpenMPDEV specific tests * --------------------------------------------------------------------*/ /* -------------------------------------------------------------------- * Test for the CUDA N_Vector N_VMake_OpenMPDEV function. Requires N_VConst * to check data. */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid) { int failure = 0; realtype *h_data, *d_data; N_Vector Y; N_VConst(NEG_HALF, X); N_VCopyFromDevice_OpenMPDEV(X); h_data = N_VGetHostArrayPointer_OpenMPDEV(X); d_data = N_VGetDeviceArrayPointer_OpenMPDEV(X); /* Case 1: h_data and d_data are not null */ Y = N_VMake_OpenMPDEV(length, h_data, d_data); if (Y == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector is NULL \n \n"); return(1); } if (N_VGetHostArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector host data == NULL \n \n"); N_VDestroy(Y); return(1); } if (N_VGetDeviceArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector device data -= NULL \n \n"); N_VDestroy(Y); return(1); } failure += check_ans(NEG_HALF, Y, length); if (failure) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 1, Proc %d \n", myid); printf(" Failed N_VConst check \n \n"); N_VDestroy(Y); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 1 \n"); } N_VDestroy(Y); /* Case 2: data is null */ Y = N_VMake_OpenMPDEV(length, NULL, NULL); if (Y != NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 2, Proc %d \n", myid); printf(" Vector is not NULL \n \n"); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 2 \n"); } N_VDestroy(Y); return(failure); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector X, sunindextype local_length) { int failure = 0; sunindextype i; realtype *Xdata; N_VCopyFromDevice_OpenMPDEV(X); Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); /* check vector data */ for (i = 0; i < local_length; i++) { failure += FNEQ(Xdata[i], ans); } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector X) { realtype *Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); if (Xdata == NULL) return SUNFALSE; else return SUNTRUE; } void set_element(N_Vector X, sunindextype i, realtype val) { set_element_range(X, i, i, val); } void set_element_range(N_Vector X, sunindextype is, sunindextype ie, realtype val) { realtype *xdev; int dev; sunindextype i; xdev = N_VGetDeviceArrayPointer_OpenMPDEV(X); dev = omp_get_default_device(); /* set elements [is,ie] of the data array */ #pragma omp target map(to:is,ie,val) is_device_ptr(xdev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) { for(i = is; i <= ie; i++) xdev[i] = val; } } realtype get_element(N_Vector X, sunindextype i) { realtype *data; N_VCopyFromDevice_OpenMPDEV(X); data = N_VGetHostArrayPointer_OpenMPDEV(X); return data[i]; } double max_time(N_Vector X, double time) { /* not running in parallel, just return input time */ return(time); } void sync_device() { /* not running on DEV, just return */ return; }
GB_binop__pow_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp32) // C=scalar+B GB (_bind1st__pow_fp32) // C=scalar+B' GB (_bind1st_tran__pow_fp32) // C=A+scalar GB (_bind2nd__pow_fp32) // C=A'+scalar GB (_bind2nd_tran__pow_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = GB_powf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_powf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP32 || GxB_NO_POW_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = GB_powf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = GB_powf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_powf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_powf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_taskloop_grainsize.c
// RUN: %libomp-compile-and-run // RUN: %libomp-compile && env KMP_TASKLOOP_MIN_TASKS=1 %libomp-run // UNSUPPORTED: gcc // We do not yet have the GOMP interface for taskloop /* * Test for taskloop * Method: caculate how many times the iteration space is dispatched * and judge if each dispatch has the requested grainsize * It is possible for two adjacent chunks are executed by the same thread */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include "omp_testsuite.h" #define CFDMAX_SIZE 1120 int test_omp_taskloop_grainsize() { int i, grainsize, count, tmp_count, result, num_off; int *tmp, *tids, *tidsArray; tidsArray = (int *)malloc(sizeof(int) * CFDMAX_SIZE); tids = tidsArray; for (grainsize = 1; grainsize < 48; ++grainsize) { fprintf(stderr, "Grainsize %d\n", grainsize); count = tmp_count = num_off = 0; for (i = 0; i < CFDMAX_SIZE; ++i) { tids[i] = -1; } #pragma omp parallel shared(tids) { #pragma omp master #pragma omp taskloop grainsize(grainsize) for (i = 0; i < CFDMAX_SIZE; i++) { tids[i] = omp_get_thread_num(); } } for (i = 0; i < CFDMAX_SIZE; ++i) { if (tids[i] == -1) { fprintf(stderr, " Iteration %d not touched!\n", i); result++; } } for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } tmp = (int *)malloc(sizeof(int) * (count + 1)); tmp[0] = 1; for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tmp_count > count) { printf("--------------------\nTestinternal Error: List too " "small!!!\n--------------------\n"); break; } if (tids[i] != tids[i + 1]) { tmp_count++; tmp[tmp_count] = 1; } else { tmp[tmp_count]++; } } // is grainsize statement working? int num_tasks = CFDMAX_SIZE / grainsize; int multiple1 = CFDMAX_SIZE / num_tasks; int multiple2 = CFDMAX_SIZE / num_tasks + 1; for (i = 0; i < count; i++) { // it is possible for 2 adjacent chunks assigned to a same thread if (tmp[i] % multiple1 != 0 && tmp[i] % multiple2 != 0) { num_off++; } } if (num_off > 1) { fprintf(stderr, " The number of bad chunks is %d\n", num_off); result++; } else { fprintf(stderr, " Everything ok\n"); } free(tmp); } free(tidsArray); return (result==0); } int main() { int i; int num_failed=0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_taskloop_grainsize()) { num_failed++; } } return num_failed; }
ocasiintf.h
//define PARALLEL 0 no loop with parallelization //define PARALLEL 1 declared loops with parallelization #define PARALLEL 1 #define MAXEL 41 #define MAXPH 501 #define PHFIXED 2 #define PHENTERED 0 #define PHSUS -3 #define GRID 0 #define NOGRID -1 #define TCtoTK 273.15 #define TAB "\t" #include "octqc.h" #include <string> #include <stdlib.h> #include <math.h> #include <iostream> #include <cstring> #include <vector> #include <sstream> #include <omp.h> #include <string> #include <fstream> #include <ctime> #include <algorithm> #include<iomanip> #include <fstream> extern"C" { void c_Change_Status_Phase(char *, int ,double ,void *); void c_tqgetv(char *, int , int , int *, double *, void *); // get equilibrium results using state variables void c_tqsetc(char *, int, int , double, int *, void *); // set condition void c_tqce(char *, int , int , double *, void *); // calculate quilibrium with possible target void c_tqini(int, void *); // initiates the OC package void c_tqrfil(char *, void *); // read all elements from a TDB file //void c_tqgcom(int *, char[MAXEL][24], void **); // get system component names. At present the elements void c_tqrpfil(char *, int, char **, void *); // read TDB file with selection of elements //void c_tqgnp(int *, void **); // get total number of phases and composition sets void c_tqgpi(int *, char *, void *); // get index of phase phasename void c_tqgpn(int, char *, void *); // get name of phase+compset tuple with index phcsx //void c_tqgnp(int, gtp_equilibrium_data **); // get total number of phases and composition sets void examine_gtp_equilibrium_data(void *); // //void c_getG(int, void *); //void c_calcg(int, int, int, int, void *); void c_tqgphc1(int, int * , int *, int *, double *, double *, double *, void *); void c_tqsphc1(int, double *, double *, void *); void c_tqcph1(int, int, int *, double *, double *, double *, double *, double *, void *); void c_List_Conditions(void *); void c_checktdb(char *); void c_newEquilibrium(char *,int *); void c_selecteq(int ,void *); void c_copy_equilibrium(void *,char *,void *); void c_set_status_globaldata(); int c_errors_number(); void c_new_gtp(); void c_reset_conditions(char *,void *); } extern"C" int c_ntup; // extern"C" int c_nel; // number of elements extern"C" int c_maxc; // extern"C" char *c_cnam[MAXEL]; // character array with all element names extern"C" double c_gval[24]; extern"C" int c_noofcs(int); extern"C" double c_mass[24]; using namespace std; const double R=8.31451; template < typename CHAR_TYPE, typename TRAITS_TYPE = std::char_traits<CHAR_TYPE> > struct basic_teebuf : public std::basic_streambuf< CHAR_TYPE, TRAITS_TYPE > { typedef std::basic_streambuf< CHAR_TYPE, TRAITS_TYPE > streambuf_type ; typedef typename TRAITS_TYPE::int_type int_type ; basic_teebuf( streambuf_type* buff_a, streambuf_type* buff_b ) : first(buff_a), second(buff_b) {} protected: virtual int_type overflow( int_type c ) { const int_type eof = TRAITS_TYPE::eof() ; if( TRAITS_TYPE::eq_int_type( c, eof ) ) return TRAITS_TYPE::not_eof(c) ; else { const CHAR_TYPE ch = TRAITS_TYPE::to_char_type(c) ; if( TRAITS_TYPE::eq_int_type( first->sputc(ch), eof ) || TRAITS_TYPE::eq_int_type( second->sputc(ch), eof ) ) return eof ; else return c ; } } virtual int sync() { return !first->pubsync() && !second->pubsync() ? 0 : -1 ; } private: streambuf_type* first ; streambuf_type* second ; }; template < typename CHAR_TYPE, typename TRAITS_TYPE = std::char_traits<CHAR_TYPE> > struct basic_teestream : public std::basic_ostream< CHAR_TYPE, TRAITS_TYPE > { typedef std::basic_ostream< CHAR_TYPE, TRAITS_TYPE > stream_type ; typedef basic_teebuf< CHAR_TYPE, TRAITS_TYPE > streambuff_type ; basic_teestream( stream_type& first, stream_type& second ) : stream_type( &stmbuf), stmbuf( first.rdbuf(), second.rdbuf() ) {} ~basic_teestream() { stmbuf.pubsync() ; } private: streambuff_type stmbuf ; }; typedef basic_teebuf<char> teebuf ; typedef basic_teestream<char> teestream ; std::ofstream logfile( "oc_log.txt" ) ; teestream sout( logfile, std::cout ) ; void Get_Ceq(const int &iceq,void *ceq){ c_selecteq(iceq,ceq); //sout << "-> Adress of ceq-Storage: [" << ceq << "]" <<endl; } void Initialize(void *ceq) { int n = 0;//0 //=============== c_tqini(n, ceq); //=============== //sout << "-> Adress of ceq-Storage: [" << ceq << "]" <<endl; }; int Create_New_Ceq_and_Return_ID(const string &Ceq_Name){ int ieq; char *buffer=(char*)malloc(Ceq_Name.length()+1); char *filename = strcpy(buffer , Ceq_Name.c_str()); c_newEquilibrium(filename,&ieq); free (buffer); return ieq; } void Get_Ceq_pointer(const int &ieq, void *ceq){ c_selecteq(ieq,&ceq); } void GetAllElementsFromDatabase(string tdbfilename){ char *buffer=(char*)malloc(tdbfilename.length()+1); char *filename = strcpy(buffer , tdbfilename.c_str()); c_checktdb(filename); free (buffer); } void ReadDatabase(string tdbfilename, void *ceq) { char *buffer=(char*)malloc(tdbfilename.length()+1); char *filename = strcpy(buffer, tdbfilename.c_str()); //====================== c_tqrfil(filename, ceq); //====================== free (buffer); /*sout << "-> Element Data: ["; for(int i = 0; i < c_nel; i++) { sout << c_cnam[i]; if(i < c_nel-1) { sout << ", "; } } sout << "]" << " [" << &ceq << "]" <<endl; */ }; void ReadDatabaseLimited(string &tdbfilename, vector<string> &elnames, void *ceq) { char *buffer=(char*)malloc(tdbfilename.length()+1); char *filename = strcpy(buffer, tdbfilename.c_str()); char *selel[elnames.size()]; for(size_t i = 0; i < elnames.size(); i++) { char *buffer=(char*)malloc(elnames[i].length()+1); char *tempchar = strcpy(buffer, elnames[i].c_str()); selel[i] = tempchar; } //============================================== c_tqrpfil(filename, elnames.size(), selel, ceq); //============================================== /* sout << "-> Element Data: ["; for(int i = 0; i < c_nel; i++) { sout << c_cnam[i]; if(i < c_nel-1) { sout << ", "; } } sout << "]" << " [" << &ceq << "]" << endl; */ free (buffer); }; void ReadPhases(vector<string> &phnames, void *ceq) { phnames.clear(); phnames.resize(c_ntup); for(int i = 1; i < c_ntup+1; i++) { char phn[24]; //========================== c_tqgpn(i, phn, ceq); //========================== int index; c_tqgpi(&index,phn,ceq); string myname(phn); transform(myname.begin(), myname.end(), myname.begin(), ::toupper);// to have it in CAPITAL LETTERS phnames[index-1]=myname; } /* sout << "-> Phase Data: ["; for(size_t i = 0; i < phnames.size(); i++) { sout << i<< " "<<phnames[i]; if(i < phnames.size()-1) { sout << ", "; } } sout << "]" << " [" << &ceq << "]" << endl; */ }; void ResetTemperature(void *ceq){ string mystring("T=none"); char *buffer=(char*)malloc(mystring.length()+1); char *conditions = strcpy(buffer, mystring.c_str()); c_reset_conditions(conditions,ceq); free (buffer); } void ResetAllConditionsButPandN(void *ceq, const vector<string> &el_reduced_names,const int &i_ref, const string &compo_unit){ { string mystring("T=none"); char *buffer=(char*)malloc(mystring.length()+1); char *conditions = strcpy(buffer, mystring.c_str()); c_reset_conditions(conditions,ceq); free (buffer); } string mystring=""; for (int i=0;i<el_reduced_names.size();i++){ if (not (i==i_ref)) { mystring=compo_unit; mystring=mystring+"("+el_reduced_names[i]+")=none"; char *buffer=(char*)malloc(mystring.length()+1); char *conditions = strcpy(buffer, mystring.c_str()); c_reset_conditions(conditions,ceq); free (buffer); } } } void Change_Phase_Status(const string &name,int nystat,double val,void *ceq){ //nystat=0 :Entered //nystat=2 :Fixed char *buffer=(char*)malloc(name.length()+1); char *phasename = strcpy(buffer, name.c_str()); c_Change_Status_Phase(phasename,nystat,val,ceq); free (buffer); } void SetTemperature(const double &T, void *ceq) { int cnum; int n1 = 0; int n2 = 0; char par[60] = "T"; // if (T < 1.0) T = 1.0; //========================================= c_tqsetc(par, n1, n2, T, &cnum, ceq); //========================================= // sout << "-> Set Temperature to: [" << T << "]" << " [" << &ceq << "]" << // endl; }; void SetPressure(const double &P, void *ceq) { int cnum; int n1 = 0; int n2 = 0; char par[60] = "P"; // if (P < 1.0) P = 1.0; //========================================= c_tqsetc(par, n1, n2, P, &cnum, ceq); //========================================= // sout << "-> Set Pressure to: [" << P << "]" << " [" << &ceq << "]" << // endl; }; void SetMoles(const double &N, void *ceq) { int cnum; int n1 = 0; int n2 = 0; char par[60] = "N"; //========================================= c_tqsetc(par, n1, n2, N, &cnum, ceq); //========================================= // sout << "-> Set Moles to: [" << N << "]" << " [" << &ceq << "]" << // endl; }; void SetComposition(vector<double>& X, void *ceq, const int &i_ref,string &compo_unit) { int cnum; int n2 = 0; char par[60]; strcpy(par,compo_unit.c_str()); for (int i = 0; i < c_nel; i++) { if (X[i] < 1.0e-8) X[i] = 1.0e-8; // Check and fix, if composition is below treshold if(not (i == i_ref)) { int j=i+1; double value= X[i];// Set and print composition, if element 'i' is not the reference/(last) element //================================================== c_tqsetc(par, j, n2,value, &cnum, ceq); //================================================== // sout << "-> Set Composition of " << c_cnam[i] << " to: [" << // X[i] << "]" << " [" << &ceq << "]" << // endl; } else { // Print composition, if element 'i' is the reference/(last) element double X_ref = 1; for(size_t j = 0; j < i; j++) { X_ref -= X[j]; } // sout << "-> Set Composition of " << c_cnam[i] << " to: [" << // X_ref << "]" << " [" << &ceq << "]" << // endl; } } }; void SetConstituents(int phidx, vector<double> y, void *ceq) { int stable1 = phidx; double extra[MAXPH]; double yfr[y.size()]; for(size_t i = 0; i < y.size(); i++) { yfr[i] = y[i]; } //=============================== c_tqsphc1(stable1,yfr,extra,ceq); //=============================== sout << "-> Set Constituents to: ["; for(int i = 0; i < y.size(); i++) { sout << i << ": " << yfr[i]; if(i < y.size()-1) { sout << ", "; } } sout << "]" << endl; }; void ReadPhaseFractions(const vector<string> &phnames, vector<double>& phfract, void *ceq) { double npf[MAXPH]; char statevar[60] = "NP"; int n1 = -1;//-1 int n2 = 0; int n3 = MAXPH;//sizeof(npf) / sizeof(npf[0]); //======================================== c_tqgetv(statevar, n1, n2, &n3, npf, ceq); //======================================== for(int i = 0; i < phnames.size(); i++){ /* char phn[24]; c_tqgpn(i+1, phn, ceq); size_t index=0; for (size_t j=0;j<phnames.size();j++){ if (phnames[j]==phn){ index=j; break; } } */ phfract[i]=npf[i]; //phfract[index]=npf[i]; //cout<<i<<" " <<phnames[i]<<" : "<< phfract[i] <<endl; } }; void GetGibbsData(int phidx, void *ceq) { int n2 = 2; int n3; double gtp[6]; double dgdy[100]; double d2gdydt[100]; double d2gdydp[100]; double d2gdy2[100]; //================================================================= c_tqcph1(phidx, n2, &n3, gtp, dgdy, d2gdydt, d2gdydp, d2gdy2, ceq); //================================================================= sout << "-> Read Gibbs Data G: ["; for(int i = 0; i < 6; i++) { sout << gtp[i]; if(i < 5) { sout << ", "; } } sout << "]" << endl; sout << "-> Read Gibbs Data dGdY: ["; for(int i = 0; i < n3; i++) { sout << dgdy[i]; if(i < n3-1) { sout << ", "; } } sout << "]" << endl; sout << "-> Read Gibbs Data d2GdYdT: ["; for(int i = 0; i < n3; i++) { sout << d2gdydt[i]; if(i < n3-1) { sout << ", "; } } sout << "]" << endl; sout << "-> Read Gibbs Data d2GdYdP: ["; for(int i = 0; i < n3; i++) { sout << d2gdydp[i]; if(i < n3-1) { sout << ", "; } } sout << "]" << endl; int kk=n2*(n2+1)/2; sout << "-> Read Gibbs Data d2GdY2: ["; for(int i = 0; i < kk; i++) { sout << d2gdy2[i]; if(i < kk-1) { sout << ", "; } } sout << "]" << endl; }; void SelectSinglePhase(int PhIdx, void *ceq) { // }; void List_Conditions(void *ceq){ c_List_Conditions(ceq); } void CalculateEquilibrium(void *ceq, const int &n1, int &i_error, const vector < string > &Suspended_phase_list) { for (int i=0;i<Suspended_phase_list.size();i++) Change_Phase_Status(Suspended_phase_list[i],PHSUS,0.0,ceq); i_error=0; char target[60] = " "; int n2 = 0; double val; int iter=0; do{ if (not (i_error==0)) { sout<<" !!!! Equilibrium not converged & trying again iter="<<iter<<endl; // c_List_Conditions(ceq); } iter+=1; //====================================== c_tqce(target, n1, n2, &val, ceq); //====================================== i_error=c_errors_number(); }while((not(i_error==0))and(iter<1)); }; void Safer_CalculateEquilibrium (void *ceq, const int &n1, int &i_error, const vector < string > &Suspended_phase_list , const string &strLIQUID,const string &strSOLIDSOLUTION, const vector<string> &phnames){ CalculateEquilibrium(&ceq,n1,i_error,Suspended_phase_list); if (i_error>0){ sout<<"first convergence issue"<<endl; for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.,&ceq); Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,1.0,&ceq); //Change_Phase_Status(strLIQUID,PHENTERED,0.5,&ceq); CalculateEquilibrium(&ceq,n1,i_error,Suspended_phase_list); if (i_error>0){ for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.,&ceq); //Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,1.0,&ceq); Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceq); CalculateEquilibrium(&ceq,n1,i_error,Suspended_phase_list); /* for (int j=0;j<phnames.size()and not (i_error==0);j++){ if (not (phnames[j]==strLIQUID) and not (phnames[j]==strSOLIDSOLUTION)){ Change_Phase_Status(phnames[j],PHENTERED,1.0,&ceq); CalculateEquilibrium(&ceq,n1,i_error,Suspended_phase_list); sout<<"trying new equilibrium with "<<phnames[j]<<endl; } } */ if (i_error>0){ sout<<"!!!!!!!! convergence issue !!!!!!!!!!!!!!!!!!!!!"<<endl; SetTemperature(2000, &ceq); CalculateEquilibrium(&ceq,n1,i_error,Suspended_phase_list); } } } }; void ReadMU(void *ceq, vector < double > &MU) { double npf[1]; char statevar[60] = "MU"; for (int i = 1; i < c_nel+1; i++) { int n1 = i; int n2 = 0; int n3 = 1; //======================================== c_tqgetv(statevar, n1, n2, &n3, npf, ceq); //======================================== MU[i-1]=npf[0]; } }; double ReadTemperature(void *ceq) { double npf[1]; char statevar[60] = "T"; int n1 = 0; int n2 = 0; int n3 = 1; double TK; //======================================== c_tqgetv(statevar, n1, n2, &n3, npf, ceq); //======================================== TK=npf[0]; return(TK); }; double ReadTotalEnthalpy(void *ceq) { double npf[1]; char statevar[60] = "H"; int n1 = 0; int n2 = 0; int n3 = 1; double H; //======================================== c_tqgetv(statevar, n1, n2, &n3, npf, ceq); //======================================== H=npf[0]; return(H); }; void ReadConstituentFractions(const vector<string> &phnames, const vector<double> &phfract, vector< vector<double> > &elfract, void *ceq, const string &compo_unit) { double pxf[10*MAXPH]; for (int i = 1; i < c_ntup+1; i++) { char phn[24]; c_tqgpn(i, phn, ceq); size_t index=0; bool index_found=false; for (size_t j=0;j<phnames.size() and not (index_found);j++){ if (phnames[j]==phn){ index=j; index_found=true; //sout<<j<<" "<<i-1<<endl; } } if (not index_found){ sout<<"problem with phase of index="<<i<<endl; } //if (phfract[index] > 1e-10) else{ char statevar[60] = "X"; strcpy(statevar,compo_unit.c_str()); int n2 = -1; //composition of stable phase n2 = -1 means all fractions int n4 = sizeof(pxf)/sizeof(pxf[0]); //======================================= c_tqgetv(statevar, i, n2, &n4, pxf, ceq); //======================================= for (int k = 0; k < n4; k++) { elfract[index][k]=pxf[k]; } } } }; void ListExtConstituentFractions(int phidx, vector<string> phnames, void *ceq) { int stable1 = phidx; int nlat; int nlatc[MAXPH]; int conlista[MAXPH]; double yfr[MAXPH]; double sites[MAXPH]; double extra[MAXPH]; //====================================================================== c_tqgphc1(stable1, &nlat, nlatc, conlista, yfr, sites, extra, ceq); //====================================================================== sout << "-> Extended Constituent Fractions for " << phnames[stable1-1] << " [" << extra[0] << " moles of atoms/formula unit]"; int consti = 0; for(int i = 0; i < nlat; i++) { sout << " ["; for(int j = 0; j < nlatc[i]; j++) { sout << "Const. " << consti << ": " << yfr[consti]; if(j < nlatc[i]-1) { sout << ", "; } consti += 1; } sout << "]_(" << sites[i] << ")"; } sout << endl; }; std::string IntToString ( int number ) { std::string mystr; std::stringstream out; out << number; mystr = out.str(); return mystr; } // Write the results of a given equilibrium // el_reduced_names: vector of names elements with non zero composition // phnames: vector of names phases that can appear for these elements // phfract: atomic fraction of these phases after equilibrium // elfract[i][j]: atomic composition of element i in phase j // ceqh: pointer for the given equilibrium calculation // mode: 1 write only atomic fractions of phases after equilibrium // mode: 1 write atomic fractions + compositions of phases after equilibrium void Write_Results_Equilibrium(ofstream& file, const vector<string> &el_reduced_names, const vector<string> &phnames, vector<double> &phfract, vector< vector<double> > &elfract, void *ceqh,const int &mode,const string &compo_unit, vector<double> &MU,const string &temp_unit,const string &myequi){ //-------------------------------List Results------------------------------- ReadPhaseFractions(phnames, phfract, &ceqh); // Read the amount of stable phases if (mode >1) ReadConstituentFractions(phnames, phfract, elfract, &ceqh, compo_unit); // Read the composition of each stable phase double TC=ReadTemperature(&ceqh); if (temp_unit=="C") TC-=TCtoTK; sout<<endl; sout<<" Equilibrium at: "<<TC<<" C fat%"; file<<myequi<<".T]"<<TAB; file<<TC<<endl; file<<myequi<<".Fat%]"<<TAB; for (size_t i=0; i<phnames.size(); i++){ if (phfract[i]>0){ sout<<" "<<phnames[i]<<"="<<phfract[i]*100; file<<phnames[i]<<TAB<<phfract[i]*100<<TAB; } } file<<endl; sout<<endl; sout.precision(6); file.precision(6); if (mode >2) { file<<myequi<<".Mu]"<<TAB; ReadMU(&ceqh, MU); for (size_t j=0; j<el_reduced_names.size();j++){ sout<<setw(5)<<"MU("<<el_reduced_names[j]<<")= "<<MU[j]<<endl; file<<el_reduced_names[j]<<TAB<<MU[j]<<TAB; } file<<endl; } if (mode >1) { file<<myequi<<".Phase_compo.Begin]"<<TAB<<compo_unit<<"%"<<endl; for (size_t i=0; i<phnames.size(); i++){ if (phfract[i]>1e-10){ sout<<" --------------------------------------- "<<endl; sout<<" "<<phnames[i]<<endl; sout<<" --------------------------------------- "<<endl; for (size_t j=0; j<el_reduced_names.size();j++){ if (elfract[i][j]>1e-10) sout<<" "<<el_reduced_names[j]<<" = "<<setw(10)<<elfract[i][j]*100<<" ("<<compo_unit<<"%)"<<endl; if (elfract[i][j]>1e-10) file<<myequi<<".Phase_compo."<<phnames[i]<<"."<<el_reduced_names[j]<<"]"<<TAB<<elfract[i][j]*100<<endl; } } } file<<myequi<<".Phase_compo.End]"<<endl; } } // *************************************************************************************************************** // find all the transitions temperatures for a given alloy composition and accuracy // if you want to run the program with parallelization // you need to declare bool parallel =true; // you need to uncomment: #pragma omp parallel for // if you want to run the program without parallelization PARALLEL is set to 0 (see top of this file) // if no parallelization the standart equilibrium pointer is used and we do not enter new equilmibria to save timme // if parallelization (here on 10 equilibria) we need to enter 10 new equilibria // this is performed with the 3 commands: // Ceq_Name=root+IntToString(i); in order to have a different name for each equilibrium // iceq=Create_New_Ceq_and_Return_ID(Ceq_Name); iceq is the index in the equilibrium vector eqlista of OC3 // Store_Equilibria.push_back(iceq); all the indexes are stored in the vector Store_Equilibria // here you scan the temperature and we create a vector of the different temperatures that will be used in the parallel calculation void Find_Transitions(const string &strLIQUID,const string &strSOLSOL,const double &TK_start,const int &nstep,const double &step_TK,vector<double> &W, const vector<string> &phnames,vector<double> &Transitions,const vector<string> &el_reduced_names,const bool first_iteration, const bool last_iteration, vector<int> &Store_Equilibria,vector< string > &Phase_transitions_mixture, void *ceq,const double required_accuracy_on_TK, const vector< string > &Suspended_phase_list, bool status_ok){ int iceq=0; vector<double> phfract; phfract.resize(phnames.size(),0.); vector< vector<double> > elfract; // Array including all equilibrium compositions elfract.resize(phnames.size(),vector<double>(el_reduced_names.size(),0.)); vector<double> TKCE; vector< vector<double> > CeqFract; TKCE.resize(0); CeqFract.resize(0); double TK_end=TK_start+(nstep-1)*step_TK; double TK=TK_start; int nstep_total=nstep; if (not first_iteration) nstep_total+=1; for (int i=0; i<nstep_total;i++){ TKCE.push_back(TK);//here you scan the temperature and we create a vector of the different temperatures that will be used in the parallel calculation TK+=step_TK; } CeqFract.resize(TKCE.size(),vector<double>(phnames.size(),0.)); size_t max_number_of_phase=0; // the three lines below trigger parallelism for the nex for {....} loop if PARALLEL is not 0 // sout<<"number of threads detected:"<<omp_get_num_procs()<<endl; #if PARALLEL>0 #pragma omp parallel for #endif for (int i=0; i<TKCE.size();i++){ void *ceqi= NULL; if ((PARALLEL>0)) { c_selecteq(Store_Equilibria[i], &ceqi);// retrieve the pointer with index stored in Store_Equilibria }else{ //ceqi=ceq;// if no parallelization use STANDART EQUILIBRIUM c_selecteq(1, &ceqi); } //for (int k=0;k<phnames.size();k++) Change_Phase_Status(phnames[k],PHENTERED,0.,&ceqi); //Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceqi);// //sout<<"T="<<TKCE[i]<<endl; SetTemperature(TKCE[i], &ceqi); // set temperature for specific equilibrium //List_Conditions(&ceqi); int i_error=0; //CalculateEquilibrium(&ceqi,NOGRID,i_error,Suspended_phase_list); Safer_CalculateEquilibrium (ceqi,NOGRID,i_error,Suspended_phase_list,strLIQUID,strSOLSOL,phnames); if (not(i_error==0)){ sout<<" equilibrium calculation not converged in transition subroutine for the following conditions"<<endl; sout<<" TK="<<TKCE[i]<<" "<< ReadTemperature(&ceqi)<<endl; sout<<" composition:"<<endl; for (size_t i=0;i<el_reduced_names.size();i++) { sout<<el_reduced_names[i]<<" (w%): "<<W[i]<<endl; } status_ok=false; } if (i_error==0){ ReadPhaseFractions(phnames, phfract, &ceqi);// get the phase fraction of all phases for (size_t j=0; j<phnames.size(); j++){ if (phfract[j]>0) CeqFract[i][j]=phfract[j]; } } } /* for (int i=0; i<TKCE.size();i++){ sout<<i<<" ["<<TK_start<<","<<TK_end<<"] ---->"<<TKCE[i]<<endl; } */ // analyse the results of ech equilibrium stored in CeqFract[i][j] is the index of the equilibrium J the index of the phase for (int i=0; (i<TKCE.size()-1) and status_ok;i++){ /*sout<<i<<" ["<<TK_start<<","<<TK_end<<"] ---->"<<TKCE[i]<<endl; for (size_t j=0; j<phnames.size(); j++){ if (CeqFract[i][j]>0) sout<<" "<<phnames[j]; } sout<<endl; */ for (size_t j=0; j<phnames.size(); j++){ if ((!(CeqFract[i][j]<1e-8)&&(CeqFract[i+1][j]<1e-8))||(!(CeqFract[i][j]>1e-8)&&(CeqFract[i+1][j]>1e-8))) { // a transition has been detected // sout<<"********transition at: "<<TKCE[i]<<endl; // sout<<"phase:"<<phnames[j]<<" "<<CeqFract[i][j]<<" "<<CeqFract[i+1][j]<<endl; int i_value; if (! last_iteration) { Transitions.push_back(TKCE[i]); }else { if (Transitions.size()==0) { Transitions.push_back(TKCE[i]); Phase_transitions_mixture.push_back(""); for (size_t k=0; k<phnames.size(); k++){ if (CeqFract[i][k]>0) { Phase_transitions_mixture.back()+=phnames[k]; Phase_transitions_mixture.back()+=" + "; } } } Transitions.push_back(TKCE[i+1]); Phase_transitions_mixture.push_back(""); bool first_phase=true; for (size_t k=0; k<phnames.size(); k++){ if (CeqFract[i+1][k]>0) { if (not first_phase) Phase_transitions_mixture.back()+=" + ";; Phase_transitions_mixture.back()+=phnames[k]; first_phase=false; } } } j=phnames.size()+1;// exit the loop } } } } // *************************************************************************************************************** // find all the transitions temperatures for a given alloy composition // step_TK: first interval of temperature used // n_step : number of steps set to NSTEP // between TK_start and TK_end=TK_start+(n_step-1)*step_TK; // required_accuracy_on_TK: self explanatory // W : weight composition of elements // phnames: vector of names phases that can appear for these elements // el_reduced_names: vector of names elements with non zero composition // ceq: pointer for the given equilibrium calculation used to pass the standart equilibrium in non parallel computation // parallelization option is in Find_Transitions // see Find_Transitions for comments on parallelization void Global_Find_Transitions(const string &strLIQUID,const string &strSOLSOL, ofstream& file,double &TK_start,const int &n_step,double &TK_end,const double required_accuracy_on_TK, vector<double> &W, const vector<string> &phnames,const vector<string> &el_reduced_names, void *ceq, const int &i_ref, const string &compo_unit, const int &ncpu, vector<int> &Store_Equilibria, vector< string > &Store_Equilibria_compo_unit, const vector< string > &Suspended_phase_list, const string &strcomponb){ string mycompo_unit=compo_unit; double TK_end_ini, TK_start_ini; TK_start_ini=TK_start; TK_end_ini=TK_end; // sout<<"sntep="<<n_step<<endl; double step_TK=(TK_end-TK_start)/(double)(n_step-1); double old_step_TK=TK_end-TK_start; int number_of_loops=(int)( log10( fabs(step_TK)/required_accuracy_on_TK)/log10(n_step)+1); //sout<<"number of loops"<<number_of_loops<<endl; vector<double> phfract; vector<double> Transitions1; vector<double> Transitions0; phfract.resize(phnames.size(),0.); string root; Transitions1.push_back(TK_start); //c_no_ph_creation(); root= "CEQ_"; string Ceq_Name=root; if (PARALLEL>0) { for (int i=Store_Equilibria.size(); i<n_step+1;i++){ Ceq_Name=root+IntToString(i);//in order to have a different name for each equilibrium int iceq=Create_New_Ceq_and_Return_ID(Ceq_Name);// iceq is the index in the equilibrium vector eqlista of OC3 //sout<<Ceq_Name<<" "<<iceq<<endl; Store_Equilibria.push_back(iceq);//all the indexes are stored in the vector Store_Equilibria string compo_unit("W"); Store_Equilibria_compo_unit.push_back(compo_unit); void *ceqi= NULL; c_selecteq(iceq, &ceqi); SetPressure(1e5, &ceqi);// Set Pressure when ceqi is created (for the first loop of Global_Find_Transitions) SetMoles(1.0, &ceqi); // Set Number of moles when ceqi is created SetComposition(W, &ceqi,i_ref,mycompo_unit);// Set the composition when ceqi is created // List_Conditions(&ceqi); c_set_status_globaldata(); } for (int i=0; i<n_step+1;i++){ void *ceqi= NULL; int iceq=Store_Equilibria[i]; c_selecteq(iceq, &ceqi); // Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceqi);// double TK=1200; SetTemperature(TK, &ceqi); SetComposition(W, &ceqi,i_ref,mycompo_unit);// Set the composition when ceqi is created // //---------------------Compute Equilibrium---------------------------- int i_error=0; for (int k=0;k<phnames.size();k++) Change_Phase_Status(phnames[k],PHENTERED,0.,&ceqi); Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceqi);// CalculateEquilibrium(&ceqi,NOGRID,i_error,Suspended_phase_list); } } bool first_iteration=true; vector< string > Phase_transitions_mixture; bool status_ok=true; for (size_t k=0; (k<number_of_loops) and status_ok;k++){ // sout<<" loop n:"<<k+1<<" increment of T="<< step_TK<<endl; if (k>0) first_iteration=false; Transitions0.resize(0); for (size_t i=0;i<Transitions1.size();i++) { Transitions0.push_back(Transitions1[i]); // sout<<i<<" "<<Transitions1[i]<<endl; } Transitions1.resize(0); bool last_iteration=false; if (k==number_of_loops-1) last_iteration=true; for (size_t i=0; (i<Transitions0.size()) and status_ok;i++){ // sout<<"treating transition : "<<Transitions0[i]<<endl; TK_start=Transitions0[i]; Find_Transitions(strLIQUID,strSOLSOL,TK_start,n_step,step_TK,W,phnames,Transitions1,el_reduced_names,first_iteration,last_iteration,Store_Equilibria,Phase_transitions_mixture, ceq,required_accuracy_on_TK,Suspended_phase_list,status_ok ); } double old_step_TK=step_TK; step_TK=step_TK/n_step; } file<<"["<<strcomponb<<".Equilibrium_sequence_of_phases.Begin]"<<TAB; file<< "["<<TK_end_ini-TCtoTK<<","<<TK_start_ini-TCtoTK<<"] C"<<endl; if (status_ok){ for (size_t i=0;i<Transitions1.size();i++) { file<<"["<<strcomponb<<".Equilibrium_sequence_of_phases]"<<TAB<<setw(4)<<i<<TAB<<setw(10)<<Transitions1[i]-TCtoTK<<TAB<<Phase_transitions_mixture[i]<<endl; } } else{ file<<"["<<strcomponb<<".Equilibrium_sequence_of_phases]"<<TAB<<"FAILED"<<endl; } file<<"["<<strcomponb<<".Equilibrium_sequence_of_phases.End]"<<endl; sout<<"======================================================================"<<endl; sout<<" TQ Parallel: "; if (PARALLEL==0) { sout<<"N0"; } else{ sout<<"Yes"; sout<<" / number of threads: "<<ncpu; } sout<<endl; sout<<" Here are the transition temperatures that have been found "<<endl; sout<<" in the temperature range ["<<TK_end_ini-TCtoTK<<","<<TK_start_ini-TCtoTK<<"] C"<<endl; sout<<" for the following composition: "<<endl; /* sout<<endl; for (size_t i=0;i<el_reduced_names.size();i++) { sout<<" "<<el_reduced_names[i]<<" ("<<mycompo_unit<<"): "<<W[i]<<endl; } sout<<" -------------------------------------------------------- "<<endl; */ for (size_t i=0;i<Transitions1.size();i++) { sout<<" "<<setw(4)<<i<<" "<<setw(10)<<Transitions1[i]-TCtoTK<<" "<<Phase_transitions_mixture[i]<<endl; } sout<<endl; TK_start=Transitions1[0]; } //************************************************************************************************************************************************************************ //************************************************************************************************************************************************************************ void scheil_solidif(const string strGradientFileOut,const string &strLIQUID, const string &strSOLIDSOLUTION,ofstream& file, const vector<string> &el_reduced_names, const vector<string> &phnames, void *ceq,vector<double> &W,const double &target_delta_f_liq, const double &delta_T_min,const double &delta_T_max, double &TK_liquidus,const int &i_ref,const string &compo_unit,const vector<string> &Suspended_phase_list, const string &strcomponb) { vector< vector<double> > elfract; vector<double> phfract_old; vector<double> phfract; vector<double> phfract_cum; elfract.resize(phnames.size(),vector<double>(el_reduced_names.size(),0.)); phfract_old.resize(phnames.size(),0.); phfract.resize(phnames.size(),0.); phfract_cum.resize(phnames.size(),0.); vector<double> TransitionsT; vector<double> TransitionsFl; vector<string> Phase_transitions_mixture; string my_compo_unit("X"); char tab = '\t'; vector<double> XLiq; XLiq.resize(el_reduced_names.size(),0.); vector<double> XLiq_ini; XLiq_ini.resize(el_reduced_names.size(),0.); vector<double> XssCastAt; XssCastAt.resize(el_reduced_names.size(),0.); double fsol_cum=0; double fLiq=1.0; double d_T=delta_T_min; int iLiq=0; int iSol=0; int i_error=0; bool phase_found=false; for (int i=0;i<phnames.size() and not phase_found;i++){ if (phnames[i]==strLIQUID){ phase_found=true; iLiq=i; } } if (not phase_found){ sout<<" problem i was assuming that the name of the liquid phase is (according to the input file:"<<strLIQUID<<endl; exit(EXIT_FAILURE); } phase_found=false; for (int i=0;i<phnames.size() and not phase_found;i++){ if (phnames[i]==strSOLIDSOLUTION){ phase_found=true; iSol=i; } } if (not phase_found){ sout<<" problem i was assuming that the name of the Solid Solution is:"<<strSOLIDSOLUTION<<endl; exit(EXIT_FAILURE); } double TK=TK_liquidus+0.01; SetTemperature(TK, &ceq); CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); ReadPhaseFractions(phnames, phfract, &ceq); // Read the amount of stable phases ReadConstituentFractions(phnames, phfract, elfract, &ceq, "X"); // Read the composition of each stable phase for (int i=0;i<el_reduced_names.size();i++){ XLiq[i]=elfract[iLiq][i]; XLiq_ini[i]=XLiq[i]; } ResetAllConditionsButPandN(&ceq, el_reduced_names,i_ref, compo_unit); TK=TK_liquidus-1*delta_T_min; SetTemperature(TK, &ceq); SetComposition(XLiq,&ceq,i_ref,my_compo_unit); CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); if (phfract[iLiq]<0.999){ sout<<"scheil solifification aborted at begining because the initial liquid fraction is too low and equal: "<<phfract[iLiq]<<endl; exit(EXIT_FAILURE); } //************************************************************************************ // main solidification loop starts here //************************************************************************************ ofstream gradientOut(strGradientFileOut.c_str()) ; file<<"["<<strcomponb<<".Scheil_solidification.Concentration_output_file]"<<TAB<<strGradientFileOut<<endl; gradientOut<<" total solid composition (at) and then solid solution (wt%) as a function of solidified fraction"<<endl; gradientOut<<" "<<setw(15)<<"[TC]"<<tab<<setw(15)<<"[solid atomic fraction]"; for (int i=0;i<el_reduced_names.size();i++){ if (not i==i_ref) gradientOut<<tab<<setw(8)<<el_reduced_names[i]<<" (at)"; } for (int i=0;i<el_reduced_names.size();i++){ if (not i==i_ref) gradientOut<<tab<<setw(8)<<el_reduced_names[i]<<" (wt%)"; } gradientOut<<endl; int j_error=0; while ((fLiq>5e-4)and(j_error<10)){ for (int i=0;i<phfract_old.size();i++) phfract_old[i]=phfract[i]; TK-=d_T; SetTemperature(TK, &ceq); SetComposition(XLiq,&ceq,i_ref,my_compo_unit); for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.0,&ceq); //Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,0.5,&ceq);// Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceq);// /* sout<<"TK= "<<TK<<endl; for (int i=0;i<el_reduced_names.size();i++){ sout<<el_reduced_names[i] <<" = "<<XLiq[i]<<endl;; } */ /* CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); if (i_error>0){ for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.0,&ceq); Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,0.5,&ceq);// Change_Phase_Status(strLIQUID,PHENTERED,0.5,&ceq);// CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); if (i_error>0){ for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.0,&ceq); Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,1.0,&ceq);// CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); } } */ Safer_CalculateEquilibrium (ceq,NOGRID,i_error,Suspended_phase_list,strLIQUID,strSOLIDSOLUTION,phnames); if (i_error>0){ sout<<"TK= "<<TK<<endl; for (int i=0;i<el_reduced_names.size();i++){ sout<<el_reduced_names[i] <<" = "<<XLiq[i]<<endl;; } d_T=delta_T_min; j_error+=1; sout<<"TK="<<TK<<" Fl="<<fLiq<<" j_error="<<j_error<<endl; } // if (i_error==0){ j_error=0; ReadPhaseFractions(phnames, phfract, &ceq); // Read the amount of stable phases ReadConstituentFractions(phnames, phfract, elfract, &ceq, "X"); // Read the composition of each stable phase if (phfract[iLiq]<0.99999){ //sout<<TK-TCtoTK<<" fl= "<<fLiq<<" "<<phfract[iLiq]<<" "<<d_T<<endl; gradientOut<<" "<<setw(15)<<TK-TCtoTK<<tab<<setw(15)<<1.0-fLiq; double fsolid=phfract[iSol]*fLiq; fsol_cum+=fsolid; for (int i=0;i<el_reduced_names.size();i++){ if (fsolid>0) XssCastAt[i]+=fsolid*elfract[iSol][i]; if (not i==i_ref) { double value=(XLiq[i]-phfract[iLiq]*elfract[iLiq][i])/(1.0-phfract[iLiq]); if (value<1e-8) value=1e-8; gradientOut<<tab<<setw(15)<<value; } } for (int i=0;i<el_reduced_names.size();i++){ XLiq[i]=elfract[iLiq][i]; } ReadConstituentFractions(phnames, phfract, elfract, &ceq, "W"); //sout<<TK-TCtoTK<<" fl= "<<fLiq<<" "<<phfract[iLiq]<<" "<<d_T<<endl; for (int i=0;i<el_reduced_names.size();i++){ if (not i==i_ref) { if (phfract[iSol]>1e-6){ gradientOut<<tab<<setw(15)<<elfract[iSol][i]*100.; } } } gradientOut<<endl; for (size_t j=0; j<phnames.size();j++){ if ((not (j==iLiq)) and (phfract[j]>0)){ phfract_cum[j]+=phfract[j]*fLiq; } } fLiq*=phfract[iLiq]; } bool transition_detected=false; for (size_t j=0; j<phnames.size() and not transition_detected; j++){ if ((!(phfract_old[j]<1e-8)&&(phfract[j]<1e-8))||(!(phfract_old[j]>1e-8)&&(phfract[j]>1e-8))){ // a transition has been detected // sout<<"********transition at: "<<TKCE[i]<<endl; // sout<<"phase:"<<phnames[j]<<" "<<CeqFract[i][j]<<" "<<CeqFract[i+1][j]<<endl; TransitionsFl.push_back(fLiq); TransitionsT.push_back(TK-TCtoTK); transition_detected=true; Phase_transitions_mixture.push_back(""); bool first_phase=true; for (size_t k=0; k<phnames.size(); k++){ if (phfract[k]>0) { if (not first_phase) Phase_transitions_mixture.back()+=" + ";; Phase_transitions_mixture.back()+=phnames[k]; first_phase=false; } } } } if (phfract[iLiq]>target_delta_f_liq) { d_T*=1.05; if (d_T>delta_T_max) d_T=delta_T_max; } if (phfract[iLiq]<target_delta_f_liq) { d_T/=1.15; if (d_T<delta_T_min) d_T=delta_T_min; } }else{ //exit(EXIT_FAILURE); } } gradientOut.close(); sout<<"------------------------------------------"<<endl; sout<<" starting composition in at : " <<endl; for (int i=0;i<el_reduced_names.size();i++){ sout<<el_reduced_names[i] <<" = "<<XLiq_ini[i]<<" at"<<endl;; } sout<<"------------------------------------------"<<endl; if (fsol_cum>0){ sout<<" concentrations left in "<<strSOLIDSOLUTION<<" after Scheil solidification: " <<endl; for (int i=0;i<el_reduced_names.size();i++){ XssCastAt[i]/=fsol_cum; sout<<el_reduced_names[i] <<" = "<<XssCastAt[i]<<" at"<<endl;; } sout<<"------------------------------------------"<<endl; } sout<<" Phases formed after Scheil solidification: " <<endl; for (size_t j=0; j<phnames.size();j++){ if ((not (j==iLiq)) and (phfract_cum[j]>0)){ sout<<"fat("<<phnames[j]<<")="<<phfract_cum[j]<<endl; } } sout<<"------------------------------------------"<<endl; file<<"["<<strcomponb<<".Scheil_solidification.Fat%]"<<TAB; for (size_t i=0; i<phnames.size(); i++){ if ((not (i==iLiq)) and (phfract_cum[i]>0)){ file<<phnames[i]<<TAB<<phfract_cum[i]*100.<<TAB; } } file<<endl; int i=iSol; for (int j=0;j<el_reduced_names.size();j++){ if (elfract[i][j]>1e-10) file<<strcomponb<<".Scheil_solidification.CompoMoyAt%."<<strSOLIDSOLUTION<<"."<<el_reduced_names[j]<<"]"<<TAB<<XssCastAt[j]*100<<endl; } file<<"["<<strcomponb<<".Scheil_solidification.Sequence_of_phases.Begin]"<<endl; sout<<"======================================================================"<<endl; sout<<" Here are the transition temperatures that have been found "<<endl; sout<<" during a Scheil solidification simulation"<<endl; /* sout<<endl; for (size_t i=0;i<el_reduced_names.size();i++) { sout<<" "<<el_reduced_names[i]<<" ("<<compo_unit<<"%): "<<W[i]*100.0<<endl; } sout<<" -------------------------------------------------------- "<<endl; */ //file <<" "<<setw(4)<<"i"<<tab<<setw(10)<<"TC"<<tab<<setw(10)<<"solid f(at)"<<tab<<"mixture of phase"<<endl; sout.precision(6); for (size_t i=0;i<TransitionsT.size();i++) { sout <<" "<<setw(4)<<i<<" "<<setw(10)<<TransitionsT[i]<<" C FL="<<setw(10)<<TransitionsFl[i]<<" "<<Phase_transitions_mixture[i]<<endl; file<<"["<<strcomponb<<".Scheil_solidification.Sequence_of_phases]"<<tab<<setw(4)<<i<<tab<<setw(10)<<TransitionsT[i]<<tab<<setw(10)<<1.0-TransitionsFl[i]<<tab<<Phase_transitions_mixture[i]<<endl; } sout<<" end of solidification: "<<TK-TCtoTK<<endl; file<<"["<<strcomponb<<".Scheil_solidification.Sequence_of_phases.End]"<<endl; file<<"["<<strcomponb<<".Scheil_solidification.End_temperature]"<<TAB; file<<TK-TCtoTK<<endl; sout<<endl; SetTemperature(1500, &ceq); for (size_t i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.0,&ceq); Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceq);// ResetAllConditionsButPandN(&ceq, el_reduced_names,i_ref,my_compo_unit); my_compo_unit=compo_unit; SetComposition(W,&ceq,i_ref,my_compo_unit); SetTemperature(1500, &ceq); for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.0,&ceq); //Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,1.0,&ceq);// Change_Phase_Status(strLIQUID,PHENTERED,1.0,&ceq);// CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); } void All_Capital_Letters(string &mystring){ transform(mystring.begin(), mystring.end(), mystring.begin(), ::toupper);// to have it in CAPITAL LETTERS } void find_TK_for_a_given_Liquid_fraction(double &TK, int &i_error, const string &strLIQUID,const string &strSOLIDSOLUTION, const double &targeted_fraction, const double &temperature_accuracy, void *ceq, const vector<string> &phnames,const vector<string> &Suspended_phase_list){ bool phase_found=false; int i_LIQ=0; vector< double > phfract; phfract.resize(phnames.size(),0.); TK=0; for (int i=0;i<phnames.size() and not phase_found;i++){ if (phnames[i]==strLIQUID){ phase_found=true; i_LIQ=i; } } if (not phase_found){ sout<<" problem i was assuming that the name of the liquid phase is (according to the input file:"<<strLIQUID<<endl; exit(EXIT_FAILURE); } double Fl=0.; double step_T=20.; int iter_max=1000; SetTemperature(1200., &ceq); CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); for (int i=0;i<phnames.size();i++) { Change_Phase_Status(phnames[i],PHENTERED,0.,&ceq); } Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,0.5,&ceq); Change_Phase_Status(strLIQUID,PHENTERED,0.5,&ceq); double valueT=673.15; int iter=0; i_error=0; while ((fabs(step_T)>temperature_accuracy)and (iter<=iter_max)){ valueT+=step_T; SetTemperature(valueT, &ceq); Safer_CalculateEquilibrium (ceq,NOGRID,i_error,Suspended_phase_list,strLIQUID,strSOLIDSOLUTION,phnames); /*CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); if (i_error>0){ for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.,&ceq); Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,1.0,&ceq); //Change_Phase_Status(strLIQUID,PHENTERED,0.5,&ceq); CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); } if (i_error>0){ for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.,&ceq); //Change_Phase_Status(strSOLIDSOLUTION,PHENTERED,1.0,&ceq); Change_Phase_Status(strLIQUID,PHENTERED,1,&ceq); CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); } */ if (i_error==0){ ReadPhaseFractions(phnames, phfract, &ceq); Fl=phfract[i_LIQ]; } else{ iter=iter_max+1; } if ((Fl>targeted_fraction) and (step_T>0)) step_T=-fabs(step_T)/2.; if ((Fl<targeted_fraction) and (step_T<0)) step_T=+fabs(step_T)/2.; //sout<<valueT<<" "<<step_T<<" "<<" "<<Fl<<" "<<i_error<<endl; iter+=1; } if (iter>iter_max) i_error=1000; if (i_error==0){ TK=valueT; } else{ sout<<"not converged"<<endl; for (int i=0;i<phnames.size();i++) Change_Phase_Status(phnames[i],PHENTERED,0.,&ceq); Change_Phase_Status(strLIQUID,PHENTERED,1,&ceq); SetTemperature(1500, &ceq); CalculateEquilibrium(&ceq,NOGRID,i_error,Suspended_phase_list); TK=-1000; } }
parser.c
/* C++ Parser. Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. Written by Mark Mitchell <mark@codesourcery.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "timevar.h" #include "cpplib.h" #include "tree.h" #include "cp-tree.h" #include "intl.h" #include "c-family/c-pragma.h" #include "decl.h" #include "flags.h" #include "diagnostic-core.h" #include "output.h" #include "target.h" #include "cgraph.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "plugin.h" #include "tree-pretty-print.h" #include "parser.h" /* The lexer. */ /* The cp_lexer_* routines mediate between the lexer proper (in libcpp and c-lex.c) and the C++ parser. */ static cp_token eof_token = { CPP_EOF, RID_MAX, 0, PRAGMA_NONE, false, false, false, 0, { NULL } }; /* The various kinds of non integral constant we encounter. */ typedef enum non_integral_constant { NIC_NONE, /* floating-point literal */ NIC_FLOAT, /* %<this%> */ NIC_THIS, /* %<__FUNCTION__%> */ NIC_FUNC_NAME, /* %<__PRETTY_FUNCTION__%> */ NIC_PRETTY_FUNC, /* %<__func__%> */ NIC_C99_FUNC, /* "%<va_arg%> */ NIC_VA_ARG, /* a cast */ NIC_CAST, /* %<typeid%> operator */ NIC_TYPEID, /* non-constant compound literals */ NIC_NCC, /* a function call */ NIC_FUNC_CALL, /* an increment */ NIC_INC, /* an decrement */ NIC_DEC, /* an array reference */ NIC_ARRAY_REF, /* %<->%> */ NIC_ARROW, /* %<.%> */ NIC_POINT, /* the address of a label */ NIC_ADDR_LABEL, /* %<*%> */ NIC_STAR, /* %<&%> */ NIC_ADDR, /* %<++%> */ NIC_PREINCREMENT, /* %<--%> */ NIC_PREDECREMENT, /* %<new%> */ NIC_NEW, /* %<delete%> */ NIC_DEL, /* calls to overloaded operators */ NIC_OVERLOADED, /* an assignment */ NIC_ASSIGNMENT, /* a comma operator */ NIC_COMMA, /* a call to a constructor */ NIC_CONSTRUCTOR, /* a transaction expression */ NIC_TRANSACTION } non_integral_constant; /* The various kinds of errors about name-lookup failing. */ typedef enum name_lookup_error { /* NULL */ NLE_NULL, /* is not a type */ NLE_TYPE, /* is not a class or namespace */ NLE_CXX98, /* is not a class, namespace, or enumeration */ NLE_NOT_CXX98 } name_lookup_error; /* The various kinds of required token */ typedef enum required_token { RT_NONE, RT_SEMICOLON, /* ';' */ RT_OPEN_PAREN, /* '(' */ RT_CLOSE_BRACE, /* '}' */ RT_OPEN_BRACE, /* '{' */ RT_CLOSE_SQUARE, /* ']' */ RT_OPEN_SQUARE, /* '[' */ RT_COMMA, /* ',' */ RT_SCOPE, /* '::' */ RT_LESS, /* '<' */ RT_GREATER, /* '>' */ RT_EQ, /* '=' */ RT_ELLIPSIS, /* '...' */ RT_MULT, /* '*' */ RT_COMPL, /* '~' */ RT_COLON, /* ':' */ RT_COLON_SCOPE, /* ':' or '::' */ RT_CLOSE_PAREN, /* ')' */ RT_COMMA_CLOSE_PAREN, /* ',' or ')' */ RT_PRAGMA_EOL, /* end of line */ RT_NAME, /* identifier */ /* The type is CPP_KEYWORD */ RT_NEW, /* new */ RT_DELETE, /* delete */ RT_RETURN, /* return */ RT_WHILE, /* while */ RT_EXTERN, /* extern */ RT_STATIC_ASSERT, /* static_assert */ RT_DECLTYPE, /* decltype */ RT_OPERATOR, /* operator */ RT_CLASS, /* class */ RT_TEMPLATE, /* template */ RT_NAMESPACE, /* namespace */ RT_USING, /* using */ RT_ASM, /* asm */ RT_TRY, /* try */ RT_CATCH, /* catch */ RT_THROW, /* throw */ RT_LABEL, /* __label__ */ RT_AT_TRY, /* @try */ RT_AT_SYNCHRONIZED, /* @synchronized */ RT_AT_THROW, /* @throw */ RT_SELECT, /* selection-statement */ RT_INTERATION, /* iteration-statement */ RT_JUMP, /* jump-statement */ RT_CLASS_KEY, /* class-key */ RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */ RT_TRANSACTION_ATOMIC, /* __transaction_atomic */ RT_TRANSACTION_RELAXED, /* __transaction_relaxed */ RT_TRANSACTION_CANCEL /* __transaction_cancel */ } required_token; /* Prototypes. */ static cp_lexer *cp_lexer_new_main (void); static cp_lexer *cp_lexer_new_from_tokens (cp_token_cache *tokens); static void cp_lexer_destroy (cp_lexer *); static int cp_lexer_saving_tokens (const cp_lexer *); static cp_token *cp_lexer_token_at (cp_lexer *, cp_token_position); static void cp_lexer_get_preprocessor_token (cp_lexer *, cp_token *); static inline cp_token *cp_lexer_peek_token (cp_lexer *); static cp_token *cp_lexer_peek_nth_token (cp_lexer *, size_t); static inline bool cp_lexer_next_token_is (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_not (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_keyword (cp_lexer *, enum rid); static cp_token *cp_lexer_consume_token (cp_lexer *); static void cp_lexer_purge_token (cp_lexer *); static void cp_lexer_purge_tokens_after (cp_lexer *, cp_token_position); static void cp_lexer_save_tokens (cp_lexer *); static void cp_lexer_commit_tokens (cp_lexer *); static void cp_lexer_rollback_tokens (cp_lexer *); static void cp_lexer_print_token (FILE *, cp_token *); static inline bool cp_lexer_debugging_p (cp_lexer *); static void cp_lexer_start_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static void cp_lexer_stop_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static cp_token_cache *cp_token_cache_new (cp_token *, cp_token *); static void cp_parser_initial_pragma (cp_token *); static tree cp_literal_operator_id (const char *); /* Manifest constants. */ #define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token)) #define CP_SAVED_TOKEN_STACK 5 /* Variables. */ /* The stream to which debugging output should be written. */ static FILE *cp_lexer_debug_stream; /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. */ int cp_unevaluated_operand; /* Dump up to NUM tokens in BUFFER to FILE starting with token START_TOKEN. If START_TOKEN is NULL, the dump starts with the first token in BUFFER. If NUM is 0, dump all the tokens. If CURR_TOKEN is set and it is one of the tokens in BUFFER, it will be highlighted by surrounding it in [[ ]]. */ static void cp_lexer_dump_tokens (FILE *file, VEC(cp_token,gc) *buffer, cp_token *start_token, unsigned num, cp_token *curr_token) { unsigned i, nprinted; cp_token *token; bool do_print; fprintf (file, "%u tokens\n", VEC_length (cp_token, buffer)); if (buffer == NULL) return; if (num == 0) num = VEC_length (cp_token, buffer); if (start_token == NULL) start_token = VEC_address (cp_token, buffer); if (start_token > VEC_address (cp_token, buffer)) { cp_lexer_print_token (file, VEC_index (cp_token, buffer, 0)); fprintf (file, " ... "); } do_print = false; nprinted = 0; for (i = 0; VEC_iterate (cp_token, buffer, i, token) && nprinted < num; i++) { if (token == start_token) do_print = true; if (!do_print) continue; nprinted++; if (token == curr_token) fprintf (file, "[["); cp_lexer_print_token (file, token); if (token == curr_token) fprintf (file, "]]"); switch (token->type) { case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: case CPP_EOF: fputc ('\n', file); break; default: fputc (' ', file); } } if (i == num && i < VEC_length (cp_token, buffer)) { fprintf (file, " ... "); cp_lexer_print_token (file, VEC_index (cp_token, buffer, VEC_length (cp_token, buffer) - 1)); } fprintf (file, "\n"); } /* Dump all tokens in BUFFER to stderr. */ void cp_lexer_debug_tokens (VEC(cp_token,gc) *buffer) { cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL); } /* Dump the cp_parser tree field T to FILE if T is non-NULL. DESC is the description for T. */ static void cp_debug_print_tree_if_set (FILE *file, const char *desc, tree t) { if (t) { fprintf (file, "%s: ", desc); print_node_brief (file, "", t, 0); } } /* Dump parser context C to FILE. */ static void cp_debug_print_context (FILE *file, cp_parser_context *c) { const char *status_s[] = { "OK", "ERROR", "COMMITTED" }; fprintf (file, "{ status = %s, scope = ", status_s[c->status]); print_node_brief (file, "", c->object_type, 0); fprintf (file, "}\n"); } /* Print the stack of parsing contexts to FILE starting with FIRST. */ static void cp_debug_print_context_stack (FILE *file, cp_parser_context *first) { unsigned i; cp_parser_context *c; fprintf (file, "Parsing context stack:\n"); for (i = 0, c = first; c; c = c->next, i++) { fprintf (file, "\t#%u: ", i); cp_debug_print_context (file, c); } } /* Print the value of FLAG to FILE. DESC is a string describing the flag. */ static void cp_debug_print_flag (FILE *file, const char *desc, bool flag) { if (flag) fprintf (file, "%s: true\n", desc); } /* Print an unparsed function entry UF to FILE. */ static void cp_debug_print_unparsed_function (FILE *file, cp_unparsed_functions_entry *uf) { unsigned i; cp_default_arg_entry *default_arg_fn; tree fn; fprintf (file, "\tFunctions with default args:\n"); for (i = 0; VEC_iterate (cp_default_arg_entry, uf->funs_with_default_args, i, default_arg_fn); i++) { fprintf (file, "\t\tClass type: "); print_node_brief (file, "", default_arg_fn->class_type, 0); fprintf (file, "\t\tDeclaration: "); print_node_brief (file, "", default_arg_fn->decl, 0); fprintf (file, "\n"); } fprintf (file, "\n\tFunctions with definitions that require " "post-processing\n\t\t"); for (i = 0; VEC_iterate (tree, uf->funs_with_definitions, i, fn); i++) { print_node_brief (file, "", fn, 0); fprintf (file, " "); } fprintf (file, "\n"); fprintf (file, "\n\tNon-static data members with initializers that require " "post-processing\n\t\t"); for (i = 0; VEC_iterate (tree, uf->nsdmis, i, fn); i++) { print_node_brief (file, "", fn, 0); fprintf (file, " "); } fprintf (file, "\n"); } /* Print the stack of unparsed member functions S to FILE. */ static void cp_debug_print_unparsed_queues (FILE *file, VEC(cp_unparsed_functions_entry, gc) *s) { unsigned i; cp_unparsed_functions_entry *uf; fprintf (file, "Unparsed functions\n"); for (i = 0; VEC_iterate (cp_unparsed_functions_entry, s, i, uf); i++) { fprintf (file, "#%u:\n", i); cp_debug_print_unparsed_function (file, uf); } } /* Dump the tokens in a window of size WINDOW_SIZE around the next_token for the given PARSER. If FILE is NULL, the output is printed on stderr. */ static void cp_debug_parser_tokens (FILE *file, cp_parser *parser, int window_size) { cp_token *next_token, *first_token, *start_token; if (file == NULL) file = stderr; next_token = parser->lexer->next_token; first_token = VEC_address (cp_token, parser->lexer->buffer); start_token = (next_token > first_token + window_size / 2) ? next_token - window_size / 2 : first_token; cp_lexer_dump_tokens (file, parser->lexer->buffer, start_token, window_size, next_token); } /* Dump debugging information for the given PARSER. If FILE is NULL, the output is printed on stderr. */ void cp_debug_parser (FILE *file, cp_parser *parser) { const size_t window_size = 20; cp_token *token; expanded_location eloc; if (file == NULL) file = stderr; fprintf (file, "Parser state\n\n"); fprintf (file, "Number of tokens: %u\n", VEC_length (cp_token, parser->lexer->buffer)); cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope); cp_debug_print_tree_if_set (file, "Object scope", parser->object_scope); cp_debug_print_tree_if_set (file, "Qualifying scope", parser->qualifying_scope); cp_debug_print_context_stack (file, parser->context); cp_debug_print_flag (file, "Allow GNU extensions", parser->allow_gnu_extensions_p); cp_debug_print_flag (file, "'>' token is greater-than", parser->greater_than_is_operator_p); cp_debug_print_flag (file, "Default args allowed in current " "parameter list", parser->default_arg_ok_p); cp_debug_print_flag (file, "Parsing integral constant-expression", parser->integral_constant_expression_p); cp_debug_print_flag (file, "Allow non-constant expression in current " "constant-expression", parser->allow_non_integral_constant_expression_p); cp_debug_print_flag (file, "Seen non-constant expression", parser->non_integral_constant_expression_p); cp_debug_print_flag (file, "Local names and 'this' forbidden in " "current context", parser->local_variables_forbidden_p); cp_debug_print_flag (file, "In unbraced linkage specification", parser->in_unbraced_linkage_specification_p); cp_debug_print_flag (file, "Parsing a declarator", parser->in_declarator_p); cp_debug_print_flag (file, "In template argument list", parser->in_template_argument_list_p); cp_debug_print_flag (file, "Parsing an iteration statement", parser->in_statement & IN_ITERATION_STMT); cp_debug_print_flag (file, "Parsing a switch statement", parser->in_statement & IN_SWITCH_STMT); cp_debug_print_flag (file, "Parsing a structured OpenMP block", parser->in_statement & IN_OMP_BLOCK); cp_debug_print_flag (file, "Parsing a an OpenMP loop", parser->in_statement & IN_OMP_FOR); cp_debug_print_flag (file, "Parsing an if statement", parser->in_statement & IN_IF_STMT); cp_debug_print_flag (file, "Parsing a type-id in an expression " "context", parser->in_type_id_in_expr_p); cp_debug_print_flag (file, "Declarations are implicitly extern \"C\"", parser->implicit_extern_c); cp_debug_print_flag (file, "String expressions should be translated " "to execution character set", parser->translate_strings_p); cp_debug_print_flag (file, "Parsing function body outside of a " "local class", parser->in_function_body); cp_debug_print_flag (file, "Auto correct a colon to a scope operator", parser->colon_corrects_to_scope_p); if (parser->type_definition_forbidden_message) fprintf (file, "Error message for forbidden type definitions: %s\n", parser->type_definition_forbidden_message); cp_debug_print_unparsed_queues (file, parser->unparsed_queues); fprintf (file, "Number of class definitions in progress: %u\n", parser->num_classes_being_defined); fprintf (file, "Number of template parameter lists for the current " "declaration: %u\n", parser->num_template_parameter_lists); cp_debug_parser_tokens (file, parser, window_size); token = parser->lexer->next_token; fprintf (file, "Next token to parse:\n"); fprintf (file, "\tToken: "); cp_lexer_print_token (file, token); eloc = expand_location (token->location); fprintf (file, "\n\tFile: %s\n", eloc.file); fprintf (file, "\tLine: %d\n", eloc.line); fprintf (file, "\tColumn: %d\n", eloc.column); } /* Allocate memory for a new lexer object and return it. */ static cp_lexer * cp_lexer_alloc (void) { cp_lexer *lexer; c_common_no_more_pch (); /* Allocate the memory. */ lexer = ggc_alloc_cleared_cp_lexer (); /* Initially we are not debugging. */ lexer->debugging_p = false; lexer->saved_tokens = VEC_alloc (cp_token_position, heap, CP_SAVED_TOKEN_STACK); /* Create the buffer. */ lexer->buffer = VEC_alloc (cp_token, gc, CP_LEXER_BUFFER_SIZE); return lexer; } /* Create a new main C++ lexer, the lexer that gets tokens from the preprocessor. */ static cp_lexer * cp_lexer_new_main (void) { cp_lexer *lexer; cp_token token; /* It's possible that parsing the first pragma will load a PCH file, which is a GC collection point. So we have to do that before allocating any memory. */ cp_parser_initial_pragma (&token); lexer = cp_lexer_alloc (); /* Put the first token in the buffer. */ VEC_quick_push (cp_token, lexer->buffer, &token); /* Get the remaining tokens from the preprocessor. */ while (token.type != CPP_EOF) { cp_lexer_get_preprocessor_token (lexer, &token); VEC_safe_push (cp_token, gc, lexer->buffer, &token); } lexer->last_token = VEC_address (cp_token, lexer->buffer) + VEC_length (cp_token, lexer->buffer) - 1; lexer->next_token = VEC_length (cp_token, lexer->buffer) ? VEC_address (cp_token, lexer->buffer) : &eof_token; /* Subsequent preprocessor diagnostics should use compiler diagnostic functions to get the compiler source location. */ done_lexing = true; gcc_assert (!lexer->next_token->purged_p); return lexer; } /* Create a new lexer whose token stream is primed with the tokens in CACHE. When these tokens are exhausted, no new tokens will be read. */ static cp_lexer * cp_lexer_new_from_tokens (cp_token_cache *cache) { cp_token *first = cache->first; cp_token *last = cache->last; cp_lexer *lexer = ggc_alloc_cleared_cp_lexer (); /* We do not own the buffer. */ lexer->buffer = NULL; lexer->next_token = first == last ? &eof_token : first; lexer->last_token = last; lexer->saved_tokens = VEC_alloc (cp_token_position, heap, CP_SAVED_TOKEN_STACK); /* Initially we are not debugging. */ lexer->debugging_p = false; gcc_assert (!lexer->next_token->purged_p); return lexer; } /* Frees all resources associated with LEXER. */ static void cp_lexer_destroy (cp_lexer *lexer) { VEC_free (cp_token, gc, lexer->buffer); VEC_free (cp_token_position, heap, lexer->saved_tokens); ggc_free (lexer); } /* Returns nonzero if debugging information should be output. */ static inline bool cp_lexer_debugging_p (cp_lexer *lexer) { return lexer->debugging_p; } static inline cp_token_position cp_lexer_token_position (cp_lexer *lexer, bool previous_p) { gcc_assert (!previous_p || lexer->next_token != &eof_token); return lexer->next_token - previous_p; } static inline cp_token * cp_lexer_token_at (cp_lexer *lexer ATTRIBUTE_UNUSED, cp_token_position pos) { return pos; } static inline void cp_lexer_set_token_position (cp_lexer *lexer, cp_token_position pos) { lexer->next_token = cp_lexer_token_at (lexer, pos); } static inline cp_token_position cp_lexer_previous_token_position (cp_lexer *lexer) { if (lexer->next_token == &eof_token) return lexer->last_token - 1; else return cp_lexer_token_position (lexer, true); } static inline cp_token * cp_lexer_previous_token (cp_lexer *lexer) { cp_token_position tp = cp_lexer_previous_token_position (lexer); return cp_lexer_token_at (lexer, tp); } /* nonzero if we are presently saving tokens. */ static inline int cp_lexer_saving_tokens (const cp_lexer* lexer) { return VEC_length (cp_token_position, lexer->saved_tokens) != 0; } /* Store the next token from the preprocessor in *TOKEN. Return true if we reach EOF. If LEXER is NULL, assume we are handling an initial #pragma pch_preprocess, and thus want the lexer to return processed strings. */ static void cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token) { static int is_extern_c = 0; /* Get a new token from the preprocessor. */ token->type = c_lex_with_flags (&token->u.value, &token->location, &token->flags, lexer == NULL ? 0 : C_LEX_STRING_NO_JOIN); token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; token->purged_p = false; /* On some systems, some header files are surrounded by an implicit extern "C" block. Set a flag in the token if it comes from such a header. */ is_extern_c += pending_lang_change; pending_lang_change = 0; token->implicit_extern_c = is_extern_c > 0; /* Check to see if this token is a keyword. */ if (token->type == CPP_NAME) { if (C_IS_RESERVED_WORD (token->u.value)) { /* Mark this token as a keyword. */ token->type = CPP_KEYWORD; /* Record which keyword. */ token->keyword = C_RID_CODE (token->u.value); } else { if (warn_cxx0x_compat && C_RID_CODE (token->u.value) >= RID_FIRST_CXX0X && C_RID_CODE (token->u.value) <= RID_LAST_CXX0X) { /* Warn about the C++0x keyword (but still treat it as an identifier). */ warning (OPT_Wc__0x_compat, "identifier %qE is a keyword in C++11", token->u.value); /* Clear out the C_RID_CODE so we don't warn about this particular identifier-turned-keyword again. */ C_SET_RID_CODE (token->u.value, RID_MAX); } token->ambiguous_p = false; token->keyword = RID_MAX; } } else if (token->type == CPP_AT_NAME) { /* This only happens in Objective-C++; it must be a keyword. */ token->type = CPP_KEYWORD; switch (C_RID_CODE (token->u.value)) { /* Replace 'class' with '@class', 'private' with '@private', etc. This prevents confusion with the C++ keyword 'class', and makes the tokens consistent with other Objective-C 'AT' keywords. For example '@class' is reported as RID_AT_CLASS which is consistent with '@synchronized', which is reported as RID_AT_SYNCHRONIZED. */ case RID_CLASS: token->keyword = RID_AT_CLASS; break; case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break; case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break; case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break; case RID_THROW: token->keyword = RID_AT_THROW; break; case RID_TRY: token->keyword = RID_AT_TRY; break; case RID_CATCH: token->keyword = RID_AT_CATCH; break; default: token->keyword = C_RID_CODE (token->u.value); } } else if (token->type == CPP_PRAGMA) { /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = ((enum pragma_kind) TREE_INT_CST_LOW (token->u.value)); token->u.value = NULL_TREE; } } /* Update the globals input_location and the input file stack from TOKEN. */ static inline void cp_lexer_set_source_position_from_token (cp_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Return a pointer to the next token in the token stream, but do not consume it. */ static inline cp_token * cp_lexer_peek_token (cp_lexer *lexer) { if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token); putc ('\n', cp_lexer_debug_stream); } return lexer->next_token; } /* Return true if the next token has the indicated TYPE. */ static inline bool cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type) { return cp_lexer_peek_token (lexer)->type == type; } /* Return true if the next token does not have the indicated TYPE. */ static inline bool cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type) { return !cp_lexer_next_token_is (lexer, type); } /* Return true if the next token is the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword == keyword; } /* Return true if the next token is not the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_not_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword != keyword; } /* Return true if the next token is a keyword for a decl-specifier. */ static bool cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer) { cp_token *token; token = cp_lexer_peek_token (lexer); switch (token->keyword) { /* auto specifier: storage-class-specifier in C++, simple-type-specifier in C++0x. */ case RID_AUTO: /* Storage classes. */ case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Elaborated type specifiers. */ case RID_ENUM: case RID_CLASS: case RID_STRUCT: case RID_UNION: case RID_TYPENAME: /* Simple type specifiers. */ case RID_CHAR: case RID_CHAR16: case RID_CHAR32: case RID_WCHAR: case RID_BOOL: case RID_SHORT: case RID_INT: case RID_LONG: case RID_INT128: case RID_SIGNED: case RID_UNSIGNED: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: /* GNU extensions. */ case RID_ATTRIBUTE: case RID_TYPEOF: /* C++0x extensions. */ case RID_DECLTYPE: case RID_UNDERLYING_TYPE: return true; default: return false; } } /* Returns TRUE iff the token T begins a decltype type. */ static bool token_is_decltype (cp_token *t) { return (t->keyword == RID_DECLTYPE || t->type == CPP_DECLTYPE); } /* Returns TRUE iff the next token begins a decltype type. */ static bool cp_lexer_next_token_is_decltype (cp_lexer *lexer) { cp_token *t = cp_lexer_peek_token (lexer); return token_is_decltype (t); } /* Return a pointer to the Nth token in the token stream. If N is 1, then this is precisely equivalent to cp_lexer_peek_token (except that it is not inline). One would like to disallow that case, but there is one case (cp_parser_nth_token_starts_template_id) where the caller passes a variable for N and it might be 1. */ static cp_token * cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n) { cp_token *token; /* N is 1-based, not zero-based. */ gcc_assert (n > 0); if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: peeking ahead %ld at token: ", (long)n); --n; token = lexer->next_token; gcc_assert (!n || token != &eof_token); while (n != 0) { ++token; if (token == lexer->last_token) { token = &eof_token; break; } if (!token->purged_p) --n; } if (cp_lexer_debugging_p (lexer)) { cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Return the next token, and advance the lexer's next_token pointer to point to the next non-purged token. */ static cp_token * cp_lexer_consume_token (cp_lexer* lexer) { cp_token *token = lexer->next_token; gcc_assert (token != &eof_token); gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL); do { lexer->next_token++; if (lexer->next_token == lexer->last_token) { lexer->next_token = &eof_token; break; } } while (lexer->next_token->purged_p); cp_lexer_set_source_position_from_token (token); /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Permanently remove the next token from the token stream, and advance the next_token pointer to refer to the next non-purged token. */ static void cp_lexer_purge_token (cp_lexer *lexer) { cp_token *tok = lexer->next_token; gcc_assert (tok != &eof_token); tok->purged_p = true; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; do { tok++; if (tok == lexer->last_token) { tok = &eof_token; break; } } while (tok->purged_p); lexer->next_token = tok; } /* Permanently remove all tokens after TOK, up to, but not including, the token that will be returned next by cp_lexer_peek_token. */ static void cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok) { cp_token *peek = lexer->next_token; if (peek == &eof_token) peek = lexer->last_token; gcc_assert (tok < peek); for ( tok += 1; tok != peek; tok += 1) { tok->purged_p = true; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; } } /* Begin saving tokens. All tokens consumed after this point will be preserved. */ static void cp_lexer_save_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n"); VEC_safe_push (cp_token_position, heap, lexer->saved_tokens, lexer->next_token); } /* Commit to the portion of the token stream most recently saved. */ static void cp_lexer_commit_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n"); VEC_pop (cp_token_position, lexer->saved_tokens); } /* Return all tokens saved since the last call to cp_lexer_save_tokens to the token stream. Stop saving tokens. */ static void cp_lexer_rollback_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n"); lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens); } /* Print a representation of the TOKEN on the STREAM. */ static void cp_lexer_print_token (FILE * stream, cp_token *token) { /* We don't use cpp_type2name here because the parser defines a few tokens of its own. */ static const char *const token_names[] = { /* cpplib-defined token types */ #define OP(e, s) #e, #define TK(e, s) #e, TTYPE_TABLE #undef OP #undef TK /* C++ parser token types - see "Manifest constants", above. */ "KEYWORD", "TEMPLATE_ID", "NESTED_NAME_SPECIFIER", }; /* For some tokens, print the associated data. */ switch (token->type) { case CPP_KEYWORD: /* Some keywords have a value that is not an IDENTIFIER_NODE. For example, `struct' is mapped to an INTEGER_CST. */ if (TREE_CODE (token->u.value) != IDENTIFIER_NODE) break; /* else fall through */ case CPP_NAME: fputs (IDENTIFIER_POINTER (token->u.value), stream); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value)); break; case CPP_NUMBER: print_generic_expr (stream, token->u.value, 0); break; default: /* If we have a name for the token, print it out. Otherwise, we simply give the numeric code. */ if (token->type < ARRAY_SIZE(token_names)) fputs (token_names[token->type], stream); else fprintf (stream, "[%d]", token->type); break; } } /* Start emitting debugging information. */ static void cp_lexer_start_debugging (cp_lexer* lexer) { lexer->debugging_p = true; cp_lexer_debug_stream = stderr; } /* Stop emitting debugging information. */ static void cp_lexer_stop_debugging (cp_lexer* lexer) { lexer->debugging_p = false; cp_lexer_debug_stream = NULL; } /* Create a new cp_token_cache, representing a range of tokens. */ static cp_token_cache * cp_token_cache_new (cp_token *first, cp_token *last) { cp_token_cache *cache = ggc_alloc_cp_token_cache (); cache->first = first; cache->last = last; return cache; } /* Decl-specifiers. */ /* Set *DECL_SPECS to represent an empty decl-specifier-seq. */ static void clear_decl_specs (cp_decl_specifier_seq *decl_specs) { memset (decl_specs, 0, sizeof (cp_decl_specifier_seq)); } /* Declarators. */ /* Nothing other than the parser should be creating declarators; declarators are a semi-syntactic representation of C++ entities. Other parts of the front end that need to create entities (like VAR_DECLs or FUNCTION_DECLs) should do that directly. */ static cp_declarator *make_call_declarator (cp_declarator *, tree, cp_cv_quals, cp_virt_specifiers, tree, tree); static cp_declarator *make_array_declarator (cp_declarator *, tree); static cp_declarator *make_pointer_declarator (cp_cv_quals, cp_declarator *); static cp_declarator *make_reference_declarator (cp_cv_quals, cp_declarator *, bool); static cp_parameter_declarator *make_parameter_declarator (cp_decl_specifier_seq *, cp_declarator *, tree); static cp_declarator *make_ptrmem_declarator (cp_cv_quals, tree, cp_declarator *); /* An erroneous declarator. */ static cp_declarator *cp_error_declarator; /* The obstack on which declarators and related data structures are allocated. */ static struct obstack declarator_obstack; /* Alloc BYTES from the declarator memory pool. */ static inline void * alloc_declarator (size_t bytes) { return obstack_alloc (&declarator_obstack, bytes); } /* Allocate a declarator of the indicated KIND. Clear fields that are common to all declarators. */ static cp_declarator * make_declarator (cp_declarator_kind kind) { cp_declarator *declarator; declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator)); declarator->kind = kind; declarator->attributes = NULL_TREE; declarator->declarator = NULL; declarator->parameter_pack_p = false; declarator->id_loc = UNKNOWN_LOCATION; return declarator; } /* Make a declarator for a generalized identifier. If QUALIFYING_SCOPE is non-NULL, the identifier is QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just UNQUALIFIED_NAME. SFK indicates the kind of special function this is, if any. */ static cp_declarator * make_id_declarator (tree qualifying_scope, tree unqualified_name, special_function_kind sfk) { cp_declarator *declarator; /* It is valid to write: class C { void f(); }; typedef C D; void D::f(); The standard is not clear about whether `typedef const C D' is legal; as of 2002-09-15 the committee is considering that question. EDG 3.0 allows that syntax. Therefore, we do as well. */ if (qualifying_scope && TYPE_P (qualifying_scope)) qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope); gcc_assert (TREE_CODE (unqualified_name) == IDENTIFIER_NODE || TREE_CODE (unqualified_name) == BIT_NOT_EXPR || TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR); declarator = make_declarator (cdk_id); declarator->u.id.qualifying_scope = qualifying_scope; declarator->u.id.unqualified_name = unqualified_name; declarator->u.id.sfk = sfk; return declarator; } /* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list of modifiers such as const or volatile to apply to the pointer type, represented as identifiers. */ cp_declarator * make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target) { cp_declarator *declarator; declarator = make_declarator (cdk_pointer); declarator->declarator = target; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = NULL_TREE; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Like make_pointer_declarator -- but for references. */ cp_declarator * make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target, bool rvalue_ref) { cp_declarator *declarator; declarator = make_declarator (cdk_reference); declarator->declarator = target; declarator->u.reference.qualifiers = cv_qualifiers; declarator->u.reference.rvalue_ref = rvalue_ref; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Like make_pointer_declarator -- but for a pointer to a non-static member of CLASS_TYPE. */ cp_declarator * make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type, cp_declarator *pointee) { cp_declarator *declarator; declarator = make_declarator (cdk_ptrmem); declarator->declarator = pointee; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = class_type; if (pointee) { declarator->parameter_pack_p = pointee->parameter_pack_p; pointee->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Make a declarator for the function given by TARGET, with the indicated PARMS. The CV_QUALIFIERS aply to the function, as in "const"-qualified member function. The EXCEPTION_SPECIFICATION indicates what exceptions can be thrown. */ cp_declarator * make_call_declarator (cp_declarator *target, tree parms, cp_cv_quals cv_qualifiers, cp_virt_specifiers virt_specifiers, tree exception_specification, tree late_return_type) { cp_declarator *declarator; declarator = make_declarator (cdk_function); declarator->declarator = target; declarator->u.function.parameters = parms; declarator->u.function.qualifiers = cv_qualifiers; declarator->u.function.virt_specifiers = virt_specifiers; declarator->u.function.exception_specification = exception_specification; declarator->u.function.late_return_type = late_return_type; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Make a declarator for an array of BOUNDS elements, each of which is defined by ELEMENT. */ cp_declarator * make_array_declarator (cp_declarator *element, tree bounds) { cp_declarator *declarator; declarator = make_declarator (cdk_array); declarator->declarator = element; declarator->u.array.bounds = bounds; if (element) { declarator->id_loc = element->id_loc; declarator->parameter_pack_p = element->parameter_pack_p; element->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Determine whether the declarator we've seen so far can be a parameter pack, when followed by an ellipsis. */ static bool declarator_can_be_parameter_pack (cp_declarator *declarator) { /* Search for a declarator name, or any other declarator that goes after the point where the ellipsis could appear in a parameter pack. If we find any of these, then this declarator can not be made into a parameter pack. */ bool found = false; while (declarator && !found) { switch ((int)declarator->kind) { case cdk_id: case cdk_array: found = true; break; case cdk_error: return true; default: declarator = declarator->declarator; break; } } return !found; } cp_parameter_declarator *no_parameters; /* Create a parameter declarator with the indicated DECL_SPECIFIERS, DECLARATOR and DEFAULT_ARGUMENT. */ cp_parameter_declarator * make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree default_argument) { cp_parameter_declarator *parameter; parameter = ((cp_parameter_declarator *) alloc_declarator (sizeof (cp_parameter_declarator))); parameter->next = NULL; if (decl_specifiers) parameter->decl_specifiers = *decl_specifiers; else clear_decl_specs (&parameter->decl_specifiers); parameter->declarator = declarator; parameter->default_argument = default_argument; parameter->ellipsis_p = false; return parameter; } /* Returns true iff DECLARATOR is a declaration for a function. */ static bool function_declarator_p (const cp_declarator *declarator) { while (declarator) { if (declarator->kind == cdk_function && declarator->declarator->kind == cdk_id) return true; if (declarator->kind == cdk_id || declarator->kind == cdk_error) return false; declarator = declarator->declarator; } return false; } /* The parser. */ /* Overview -------- A cp_parser parses the token stream as specified by the C++ grammar. Its job is purely parsing, not semantic analysis. For example, the parser breaks the token stream into declarators, expressions, statements, and other similar syntactic constructs. It does not check that the types of the expressions on either side of an assignment-statement are compatible, or that a function is not declared with a parameter of type `void'. The parser invokes routines elsewhere in the compiler to perform semantic analysis and to build up the abstract syntax tree for the code processed. The parser (and the template instantiation code, which is, in a way, a close relative of parsing) are the only parts of the compiler that should be calling push_scope and pop_scope, or related functions. The parser (and template instantiation code) keeps track of what scope is presently active; everything else should simply honor that. (The code that generates static initializers may also need to set the scope, in order to check access control correctly when emitting the initializers.) Methodology ----------- The parser is of the standard recursive-descent variety. Upcoming tokens in the token stream are examined in order to determine which production to use when parsing a non-terminal. Some C++ constructs require arbitrary look ahead to disambiguate. For example, it is impossible, in the general case, to tell whether a statement is an expression or declaration without scanning the entire statement. Therefore, the parser is capable of "parsing tentatively." When the parser is not sure what construct comes next, it enters this mode. Then, while we attempt to parse the construct, the parser queues up error messages, rather than issuing them immediately, and saves the tokens it consumes. If the construct is parsed successfully, the parser "commits", i.e., it issues any queued error messages and the tokens that were being preserved are permanently discarded. If, however, the construct is not parsed successfully, the parser rolls back its state completely so that it can resume parsing using a different alternative. Future Improvements ------------------- The performance of the parser could probably be improved substantially. We could often eliminate the need to parse tentatively by looking ahead a little bit. In some places, this approach might not entirely eliminate the need to parse tentatively, but it might still speed up the average case. */ /* Flags that are passed to some parsing functions. These values can be bitwise-ored together. */ enum { /* No flags. */ CP_PARSER_FLAGS_NONE = 0x0, /* The construct is optional. If it is not present, then no error should be issued. */ CP_PARSER_FLAGS_OPTIONAL = 0x1, /* When parsing a type-specifier, treat user-defined type-names as non-type identifiers. */ CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2, /* When parsing a type-specifier, do not try to parse a class-specifier or enum-specifier. */ CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS = 0x4, /* When parsing a decl-specifier-seq, only allow type-specifier or constexpr. */ CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR = 0x8 }; /* This type is used for parameters and variables which hold combinations of the above flags. */ typedef int cp_parser_flags; /* The different kinds of declarators we want to parse. */ typedef enum cp_parser_declarator_kind { /* We want an abstract declarator. */ CP_PARSER_DECLARATOR_ABSTRACT, /* We want a named declarator. */ CP_PARSER_DECLARATOR_NAMED, /* We don't mind, but the name must be an unqualified-id. */ CP_PARSER_DECLARATOR_EITHER } cp_parser_declarator_kind; /* The precedence values used to parse binary expressions. The minimum value of PREC must be 1, because zero is reserved to quickly discriminate binary operators from other tokens. */ enum cp_parser_prec { PREC_NOT_OPERATOR, PREC_LOGICAL_OR_EXPRESSION, PREC_LOGICAL_AND_EXPRESSION, PREC_INCLUSIVE_OR_EXPRESSION, PREC_EXCLUSIVE_OR_EXPRESSION, PREC_AND_EXPRESSION, PREC_EQUALITY_EXPRESSION, PREC_RELATIONAL_EXPRESSION, PREC_SHIFT_EXPRESSION, PREC_ADDITIVE_EXPRESSION, PREC_MULTIPLICATIVE_EXPRESSION, PREC_PM_EXPRESSION, NUM_PREC_VALUES = PREC_PM_EXPRESSION }; /* A mapping from a token type to a corresponding tree node type, with a precedence value. */ typedef struct cp_parser_binary_operations_map_node { /* The token type. */ enum cpp_ttype token_type; /* The corresponding tree code. */ enum tree_code tree_type; /* The precedence of this operator. */ enum cp_parser_prec prec; } cp_parser_binary_operations_map_node; typedef struct cp_parser_expression_stack_entry { /* Left hand side of the binary operation we are currently parsing. */ tree lhs; /* Original tree code for left hand side, if it was a binary expression itself (used for -Wparentheses). */ enum tree_code lhs_type; /* Tree code for the binary operation we are parsing. */ enum tree_code tree_type; /* Precedence of the binary operation we are parsing. */ enum cp_parser_prec prec; } cp_parser_expression_stack_entry; /* The stack for storing partial expressions. We only need NUM_PREC_VALUES entries because precedence levels on the stack are monotonically increasing. */ typedef struct cp_parser_expression_stack_entry cp_parser_expression_stack[NUM_PREC_VALUES]; /* Prototypes. */ /* Constructors and destructors. */ static cp_parser_context *cp_parser_context_new (cp_parser_context *); /* Class variables. */ static GTY((deletable)) cp_parser_context* cp_parser_context_free_list; /* The operator-precedence table used by cp_parser_binary_expression. Transformed into an associative array (binops_by_token) by cp_parser_new. */ static const cp_parser_binary_operations_map_node binops[] = { { CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION }, { CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION }, { CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION }, { CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION }, { CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION }, { CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION }, { CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION } }; /* The same as binops, but initialized by cp_parser_new so that binops_by_token[N].token_type == N. Used in cp_parser_binary_expression for speed. */ static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES]; /* Constructors and destructors. */ /* Construct a new context. The context below this one on the stack is given by NEXT. */ static cp_parser_context * cp_parser_context_new (cp_parser_context* next) { cp_parser_context *context; /* Allocate the storage. */ if (cp_parser_context_free_list != NULL) { /* Pull the first entry from the free list. */ context = cp_parser_context_free_list; cp_parser_context_free_list = context->next; memset (context, 0, sizeof (*context)); } else context = ggc_alloc_cleared_cp_parser_context (); /* No errors have occurred yet in this context. */ context->status = CP_PARSER_STATUS_KIND_NO_ERROR; /* If this is not the bottommost context, copy information that we need from the previous context. */ if (next) { /* If, in the NEXT context, we are parsing an `x->' or `x.' expression, then we are parsing one in this context, too. */ context->object_type = next->object_type; /* Thread the stack. */ context->next = next; } return context; } /* Managing the unparsed function queues. */ #define unparsed_funs_with_default_args \ VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_default_args #define unparsed_funs_with_definitions \ VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->funs_with_definitions #define unparsed_nsdmis \ VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues)->nsdmis static void push_unparsed_function_queues (cp_parser *parser) { VEC_safe_push (cp_unparsed_functions_entry, gc, parser->unparsed_queues, NULL); unparsed_funs_with_default_args = NULL; unparsed_funs_with_definitions = make_tree_vector (); unparsed_nsdmis = NULL; } static void pop_unparsed_function_queues (cp_parser *parser) { release_tree_vector (unparsed_funs_with_definitions); VEC_pop (cp_unparsed_functions_entry, parser->unparsed_queues); } /* Prototypes. */ /* Constructors and destructors. */ static cp_parser *cp_parser_new (void); /* Routines to parse various constructs. Those that return `tree' will return the error_mark_node (rather than NULL_TREE) if a parse error occurs, unless otherwise noted. Sometimes, they will return an ordinary node if error-recovery was attempted, even though a parse error occurred. So, to check whether or not a parse error occurred, you should always use cp_parser_error_occurred. If the construct is optional (indicated either by an `_opt' in the name of the function that does the parsing or via a FLAGS parameter), then NULL_TREE is returned if the construct is not present. */ /* Lexical conventions [gram.lex] */ static tree cp_parser_identifier (cp_parser *); static tree cp_parser_string_literal (cp_parser *, bool, bool); static tree cp_parser_userdef_char_literal (cp_parser *); static tree cp_parser_userdef_string_literal (cp_token *); static tree cp_parser_userdef_numeric_literal (cp_parser *); /* Basic concepts [gram.basic] */ static bool cp_parser_translation_unit (cp_parser *); /* Expressions [gram.expr] */ static tree cp_parser_primary_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_id_expression (cp_parser *, bool, bool, bool *, bool, bool); static tree cp_parser_unqualified_id (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier_opt (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier (cp_parser *, bool, bool, bool, bool); static tree cp_parser_qualifying_entity (cp_parser *, bool, bool, bool, bool, bool); static tree cp_parser_postfix_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_postfix_open_square_expression (cp_parser *, tree, bool); static tree cp_parser_postfix_dot_deref_expression (cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t); static VEC(tree,gc) *cp_parser_parenthesized_expression_list (cp_parser *, int, bool, bool, bool *); /* Values for the second parameter of cp_parser_parenthesized_expression_list. */ enum { non_attr = 0, normal_attr = 1, id_attr = 2 }; static void cp_parser_pseudo_destructor_name (cp_parser *, tree *, tree *); static tree cp_parser_unary_expression (cp_parser *, bool, bool, cp_id_kind *); static enum tree_code cp_parser_unary_operator (cp_token *); static tree cp_parser_new_expression (cp_parser *); static VEC(tree,gc) *cp_parser_new_placement (cp_parser *); static tree cp_parser_new_type_id (cp_parser *, tree *); static cp_declarator *cp_parser_new_declarator_opt (cp_parser *); static cp_declarator *cp_parser_direct_new_declarator (cp_parser *); static VEC(tree,gc) *cp_parser_new_initializer (cp_parser *); static tree cp_parser_delete_expression (cp_parser *); static tree cp_parser_cast_expression (cp_parser *, bool, bool, cp_id_kind *); static tree cp_parser_binary_expression (cp_parser *, bool, bool, enum cp_parser_prec, cp_id_kind *); static tree cp_parser_question_colon_clause (cp_parser *, tree); static tree cp_parser_assignment_expression (cp_parser *, bool, cp_id_kind *); static enum tree_code cp_parser_assignment_operator_opt (cp_parser *); static tree cp_parser_expression (cp_parser *, bool, cp_id_kind *); static tree cp_parser_constant_expression (cp_parser *, bool, bool *); static tree cp_parser_builtin_offsetof (cp_parser *); static tree cp_parser_lambda_expression (cp_parser *); static void cp_parser_lambda_introducer (cp_parser *, tree); static bool cp_parser_lambda_declarator_opt (cp_parser *, tree); static void cp_parser_lambda_body (cp_parser *, tree); /* Statements [gram.stmt.stmt] */ static void cp_parser_statement (cp_parser *, tree, bool, bool *); static void cp_parser_label_for_labeled_statement (cp_parser *); static tree cp_parser_expression_statement (cp_parser *, tree); static tree cp_parser_compound_statement (cp_parser *, tree, bool, bool); static void cp_parser_statement_seq_opt (cp_parser *, tree); static tree cp_parser_selection_statement (cp_parser *, bool *); static tree cp_parser_condition (cp_parser *); static tree cp_parser_iteration_statement (cp_parser *); static bool cp_parser_for_init_statement (cp_parser *, tree *decl); static tree cp_parser_for (cp_parser *); static tree cp_parser_c_for (cp_parser *, tree, tree); static tree cp_parser_range_for (cp_parser *, tree, tree, tree); static void do_range_for_auto_deduction (tree, tree); static tree cp_parser_perform_range_for_lookup (tree, tree *, tree *); static tree cp_parser_range_for_member_function (tree, tree); static tree cp_parser_jump_statement (cp_parser *); static void cp_parser_declaration_statement (cp_parser *); static tree cp_parser_implicitly_scoped_statement (cp_parser *, bool *); static void cp_parser_already_scoped_statement (cp_parser *); /* Declarations [gram.dcl.dcl] */ static void cp_parser_declaration_seq_opt (cp_parser *); static void cp_parser_declaration (cp_parser *); static void cp_parser_block_declaration (cp_parser *, bool); static void cp_parser_simple_declaration (cp_parser *, bool, tree *); static void cp_parser_decl_specifier_seq (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *); static tree cp_parser_storage_class_specifier_opt (cp_parser *); static tree cp_parser_function_specifier_opt (cp_parser *, cp_decl_specifier_seq *); static tree cp_parser_type_specifier (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool, int *, bool *); static tree cp_parser_simple_type_specifier (cp_parser *, cp_decl_specifier_seq *, cp_parser_flags); static tree cp_parser_type_name (cp_parser *); static tree cp_parser_nonclass_name (cp_parser* parser); static tree cp_parser_elaborated_type_specifier (cp_parser *, bool, bool); static tree cp_parser_enum_specifier (cp_parser *); static void cp_parser_enumerator_list (cp_parser *, tree); static void cp_parser_enumerator_definition (cp_parser *, tree); static tree cp_parser_namespace_name (cp_parser *); static void cp_parser_namespace_definition (cp_parser *); static void cp_parser_namespace_body (cp_parser *); static tree cp_parser_qualified_namespace_specifier (cp_parser *); static void cp_parser_namespace_alias_definition (cp_parser *); static bool cp_parser_using_declaration (cp_parser *, bool); static void cp_parser_using_directive (cp_parser *); static tree cp_parser_alias_declaration (cp_parser *); static void cp_parser_asm_definition (cp_parser *); static void cp_parser_linkage_specification (cp_parser *); static void cp_parser_static_assert (cp_parser *, bool); static tree cp_parser_decltype (cp_parser *); /* Declarators [gram.dcl.decl] */ static tree cp_parser_init_declarator (cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *, tree *); static cp_declarator *cp_parser_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool *, bool); static cp_declarator *cp_parser_direct_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool); static enum tree_code cp_parser_ptr_operator (cp_parser *, tree *, cp_cv_quals *); static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser *); static cp_virt_specifiers cp_parser_virt_specifier_seq_opt (cp_parser *); static tree cp_parser_late_return_type_opt (cp_parser *, cp_cv_quals); static tree cp_parser_declarator_id (cp_parser *, bool); static tree cp_parser_type_id (cp_parser *); static tree cp_parser_template_type_arg (cp_parser *); static tree cp_parser_trailing_type_id (cp_parser *); static tree cp_parser_type_id_1 (cp_parser *, bool, bool); static void cp_parser_type_specifier_seq (cp_parser *, bool, bool, cp_decl_specifier_seq *); static tree cp_parser_parameter_declaration_clause (cp_parser *); static tree cp_parser_parameter_declaration_list (cp_parser *, bool *); static cp_parameter_declarator *cp_parser_parameter_declaration (cp_parser *, bool, bool *); static tree cp_parser_default_argument (cp_parser *, bool); static void cp_parser_function_body (cp_parser *); static tree cp_parser_initializer (cp_parser *, bool *, bool *); static tree cp_parser_initializer_clause (cp_parser *, bool *); static tree cp_parser_braced_list (cp_parser*, bool*); static VEC(constructor_elt,gc) *cp_parser_initializer_list (cp_parser *, bool *); static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *); /* Classes [gram.class] */ static tree cp_parser_class_name (cp_parser *, bool, bool, enum tag_types, bool, bool, bool); static tree cp_parser_class_specifier (cp_parser *); static tree cp_parser_class_head (cp_parser *, bool *); static enum tag_types cp_parser_class_key (cp_parser *); static void cp_parser_member_specification_opt (cp_parser *); static void cp_parser_member_declaration (cp_parser *); static tree cp_parser_pure_specifier (cp_parser *); static tree cp_parser_constant_initializer (cp_parser *); /* Derived classes [gram.class.derived] */ static tree cp_parser_base_clause (cp_parser *); static tree cp_parser_base_specifier (cp_parser *); /* Special member functions [gram.special] */ static tree cp_parser_conversion_function_id (cp_parser *); static tree cp_parser_conversion_type_id (cp_parser *); static cp_declarator *cp_parser_conversion_declarator_opt (cp_parser *); static bool cp_parser_ctor_initializer_opt (cp_parser *); static void cp_parser_mem_initializer_list (cp_parser *); static tree cp_parser_mem_initializer (cp_parser *); static tree cp_parser_mem_initializer_id (cp_parser *); /* Overloading [gram.over] */ static tree cp_parser_operator_function_id (cp_parser *); static tree cp_parser_operator (cp_parser *); /* Templates [gram.temp] */ static void cp_parser_template_declaration (cp_parser *, bool); static tree cp_parser_template_parameter_list (cp_parser *); static tree cp_parser_template_parameter (cp_parser *, bool *, bool *); static tree cp_parser_type_parameter (cp_parser *, bool *); static tree cp_parser_template_id (cp_parser *, bool, bool, bool); static tree cp_parser_template_name (cp_parser *, bool, bool, bool, bool *); static tree cp_parser_template_argument_list (cp_parser *); static tree cp_parser_template_argument (cp_parser *); static void cp_parser_explicit_instantiation (cp_parser *); static void cp_parser_explicit_specialization (cp_parser *); /* Exception handling [gram.exception] */ static tree cp_parser_try_block (cp_parser *); static bool cp_parser_function_try_block (cp_parser *); static void cp_parser_handler_seq (cp_parser *); static void cp_parser_handler (cp_parser *); static tree cp_parser_exception_declaration (cp_parser *); static tree cp_parser_throw_expression (cp_parser *); static tree cp_parser_exception_specification_opt (cp_parser *); static tree cp_parser_type_id_list (cp_parser *); /* GNU Extensions */ static tree cp_parser_asm_specification_opt (cp_parser *); static tree cp_parser_asm_operand_list (cp_parser *); static tree cp_parser_asm_clobber_list (cp_parser *); static tree cp_parser_asm_label_list (cp_parser *); static tree cp_parser_attributes_opt (cp_parser *); static tree cp_parser_attribute_list (cp_parser *); static bool cp_parser_extension_opt (cp_parser *, int *); static void cp_parser_label_declaration (cp_parser *); /* Transactional Memory Extensions */ static tree cp_parser_transaction (cp_parser *, enum rid); static tree cp_parser_transaction_expression (cp_parser *, enum rid); static bool cp_parser_function_transaction (cp_parser *, enum rid); static tree cp_parser_transaction_cancel (cp_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool cp_parser_pragma (cp_parser *, enum pragma_context); /* Objective-C++ Productions */ static tree cp_parser_objc_message_receiver (cp_parser *); static tree cp_parser_objc_message_args (cp_parser *); static tree cp_parser_objc_message_expression (cp_parser *); static tree cp_parser_objc_encode_expression (cp_parser *); static tree cp_parser_objc_defs_expression (cp_parser *); static tree cp_parser_objc_protocol_expression (cp_parser *); static tree cp_parser_objc_selector_expression (cp_parser *); static tree cp_parser_objc_expression (cp_parser *); static bool cp_parser_objc_selector_p (enum cpp_ttype); static tree cp_parser_objc_selector (cp_parser *); static tree cp_parser_objc_protocol_refs_opt (cp_parser *); static void cp_parser_objc_declaration (cp_parser *, tree); static tree cp_parser_objc_statement (cp_parser *); static bool cp_parser_objc_valid_prefix_attributes (cp_parser *, tree *); static void cp_parser_objc_at_property_declaration (cp_parser *) ; static void cp_parser_objc_at_synthesize_declaration (cp_parser *) ; static void cp_parser_objc_at_dynamic_declaration (cp_parser *) ; static tree cp_parser_objc_struct_declaration (cp_parser *) ; /* Utility Routines */ static tree cp_parser_lookup_name (cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t); static tree cp_parser_lookup_name_simple (cp_parser *, tree, location_t); static tree cp_parser_maybe_treat_template_as_class (tree, bool); static bool cp_parser_check_declarator_template_parameters (cp_parser *, cp_declarator *, location_t); static bool cp_parser_check_template_parameters (cp_parser *, unsigned, location_t, cp_declarator *); static tree cp_parser_simple_cast_expression (cp_parser *); static tree cp_parser_global_scope_opt (cp_parser *, bool); static bool cp_parser_constructor_declarator_p (cp_parser *, bool); static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *); static tree cp_parser_function_definition_after_declarator (cp_parser *, bool); static void cp_parser_template_declaration_after_export (cp_parser *, bool); static void cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)*); static tree cp_parser_single_declaration (cp_parser *, VEC (deferred_access_check,gc)*, bool, bool, bool *); static tree cp_parser_functional_cast (cp_parser *, tree); static tree cp_parser_save_member_function_body (cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree); static tree cp_parser_save_nsdmi (cp_parser *); static tree cp_parser_enclosed_template_argument_list (cp_parser *); static void cp_parser_save_default_args (cp_parser *, tree); static void cp_parser_late_parsing_for_member (cp_parser *, tree); static tree cp_parser_late_parse_one_default_arg (cp_parser *, tree, tree, tree); static void cp_parser_late_parsing_nsdmi (cp_parser *, tree); static void cp_parser_late_parsing_default_args (cp_parser *, tree); static tree cp_parser_sizeof_operand (cp_parser *, enum rid); static tree cp_parser_trait_expr (cp_parser *, enum rid); static bool cp_parser_declares_only_class_p (cp_parser *); static void cp_parser_set_storage_class (cp_parser *, cp_decl_specifier_seq *, enum rid, location_t); static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *, tree, location_t, bool); static bool cp_parser_friend_p (const cp_decl_specifier_seq *); static void cp_parser_required_error (cp_parser *, required_token, bool); static cp_token *cp_parser_require (cp_parser *, enum cpp_ttype, required_token); static cp_token *cp_parser_require_keyword (cp_parser *, enum rid, required_token); static bool cp_parser_token_starts_function_definition_p (cp_token *); static bool cp_parser_next_token_starts_class_definition_p (cp_parser *); static bool cp_parser_next_token_ends_template_argument_p (cp_parser *); static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser *, size_t); static enum tag_types cp_parser_token_is_class_key (cp_token *); static void cp_parser_check_class_key (enum tag_types, tree type); static void cp_parser_check_access_in_redeclaration (tree type, location_t location); static bool cp_parser_optional_template_keyword (cp_parser *); static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *); static bool cp_parser_cache_group (cp_parser *, enum cpp_ttype, unsigned); static tree cp_parser_cache_defarg (cp_parser *parser, bool nsdmi); static void cp_parser_parse_tentatively (cp_parser *); static void cp_parser_commit_to_tentative_parse (cp_parser *); static void cp_parser_abort_tentative_parse (cp_parser *); static bool cp_parser_parse_definitely (cp_parser *); static inline bool cp_parser_parsing_tentatively (cp_parser *); static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser *); static void cp_parser_error (cp_parser *, const char *); static void cp_parser_name_lookup_error (cp_parser *, tree, tree, name_lookup_error, location_t); static bool cp_parser_simulate_error (cp_parser *); static bool cp_parser_check_type_definition (cp_parser *); static void cp_parser_check_for_definition_in_return_type (cp_declarator *, tree, location_t type_location); static void cp_parser_check_for_invalid_template_id (cp_parser *, tree, location_t location); static bool cp_parser_non_integral_constant_expression (cp_parser *, non_integral_constant); static void cp_parser_diagnose_invalid_type_name (cp_parser *, tree, tree, location_t); static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *); static int cp_parser_skip_to_closing_parenthesis (cp_parser *, bool, bool, bool); static void cp_parser_skip_to_end_of_statement (cp_parser *); static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *); static void cp_parser_skip_to_end_of_block_or_statement (cp_parser *); static bool cp_parser_skip_to_closing_brace (cp_parser *); static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser *); static void cp_parser_skip_to_pragma_eol (cp_parser*, cp_token *); static bool cp_parser_error_occurred (cp_parser *); static bool cp_parser_allow_gnu_extensions_p (cp_parser *); static bool cp_parser_is_pure_string_literal (cp_token *); static bool cp_parser_is_string_literal (cp_token *); static bool cp_parser_is_keyword (cp_token *, enum rid); static tree cp_parser_make_typename_type (cp_parser *, tree, tree, location_t location); static cp_declarator * cp_parser_make_indirect_declarator (enum tree_code, tree, cp_cv_quals, cp_declarator *); /* Returns nonzero if we are parsing tentatively. */ static inline bool cp_parser_parsing_tentatively (cp_parser* parser) { return parser->context->next != NULL; } /* Returns nonzero if TOKEN is a string literal. */ static bool cp_parser_is_pure_string_literal (cp_token* token) { return (token->type == CPP_STRING || token->type == CPP_STRING16 || token->type == CPP_STRING32 || token->type == CPP_WSTRING || token->type == CPP_UTF8STRING); } /* Returns nonzero if TOKEN is a string literal of a user-defined string literal. */ static bool cp_parser_is_string_literal (cp_token* token) { return (cp_parser_is_pure_string_literal (token) || token->type == CPP_STRING_USERDEF || token->type == CPP_STRING16_USERDEF || token->type == CPP_STRING32_USERDEF || token->type == CPP_WSTRING_USERDEF || token->type == CPP_UTF8STRING_USERDEF); } /* Returns nonzero if TOKEN is the indicated KEYWORD. */ static bool cp_parser_is_keyword (cp_token* token, enum rid keyword) { return token->keyword == keyword; } /* If not parsing tentatively, issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". */ static void cp_parser_error (cp_parser* parser, const char* gmsgid) { if (!cp_parser_simulate_error (parser)) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ cp_lexer_set_source_position_from_token (token); if (token->type == CPP_PRAGMA) { error_at (token->location, "%<#pragma%> is not allowed here"); cp_parser_skip_to_pragma_eol (parser, token); return; } c_parse_error (gmsgid, /* Because c_parser_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), token->u.value, token->flags); } } /* Issue an error about name-lookup failing. NAME is the IDENTIFIER_NODE DECL is the result of the lookup (as returned from cp_parser_lookup_name). DESIRED is the thing that we hoped to find. */ static void cp_parser_name_lookup_error (cp_parser* parser, tree name, tree decl, name_lookup_error desired, location_t location) { /* If name lookup completely failed, tell the user that NAME was not declared. */ if (decl == error_mark_node) { if (parser->scope && parser->scope != global_namespace) error_at (location, "%<%E::%E%> has not been declared", parser->scope, name); else if (parser->scope == global_namespace) error_at (location, "%<::%E%> has not been declared", name); else if (parser->object_scope && !CLASS_TYPE_P (parser->object_scope)) error_at (location, "request for member %qE in non-class type %qT", name, parser->object_scope); else if (parser->object_scope) error_at (location, "%<%T::%E%> has not been declared", parser->object_scope, name); else error_at (location, "%qE has not been declared", name); } else if (parser->scope && parser->scope != global_namespace) { switch (desired) { case NLE_TYPE: error_at (location, "%<%E::%E%> is not a type", parser->scope, name); break; case NLE_CXX98: error_at (location, "%<%E::%E%> is not a class or namespace", parser->scope, name); break; case NLE_NOT_CXX98: error_at (location, "%<%E::%E%> is not a class, namespace, or enumeration", parser->scope, name); break; default: gcc_unreachable (); } } else if (parser->scope == global_namespace) { switch (desired) { case NLE_TYPE: error_at (location, "%<::%E%> is not a type", name); break; case NLE_CXX98: error_at (location, "%<::%E%> is not a class or namespace", name); break; case NLE_NOT_CXX98: error_at (location, "%<::%E%> is not a class, namespace, or enumeration", name); break; default: gcc_unreachable (); } } else { switch (desired) { case NLE_TYPE: error_at (location, "%qE is not a type", name); break; case NLE_CXX98: error_at (location, "%qE is not a class or namespace", name); break; case NLE_NOT_CXX98: error_at (location, "%qE is not a class, namespace, or enumeration", name); break; default: gcc_unreachable (); } } } /* If we are parsing tentatively, remember that an error has occurred during this tentative parse. Returns true if the error was simulated; false if a message should be issued by the caller. */ static bool cp_parser_simulate_error (cp_parser* parser) { if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { parser->context->status = CP_PARSER_STATUS_KIND_ERROR; return true; } return false; } /* Check for repeated decl-specifiers. */ static void cp_parser_check_decl_spec (cp_decl_specifier_seq *decl_specs, location_t location) { int ds; for (ds = ds_first; ds != ds_last; ++ds) { unsigned count = decl_specs->specs[ds]; if (count < 2) continue; /* The "long" specifier is a special case because of "long long". */ if (ds == ds_long) { if (count > 2) error_at (location, "%<long long long%> is too long for GCC"); else pedwarn_cxx98 (location, OPT_Wlong_long, "ISO C++ 1998 does not support %<long long%>"); } else if (count > 1) { static const char *const decl_spec_names[] = { "signed", "unsigned", "short", "long", "const", "volatile", "restrict", "inline", "virtual", "explicit", "friend", "typedef", "using", "constexpr", "__complex", "__thread" }; error_at (location, "duplicate %qs", decl_spec_names[ds]); } } } /* This function is called when a type is defined. If type definitions are forbidden at this point, an error message is issued. */ static bool cp_parser_check_type_definition (cp_parser* parser) { /* If types are forbidden here, issue a message. */ if (parser->type_definition_forbidden_message) { /* Don't use `%s' to print the string, because quotations (`%<', `%>') in the message need to be interpreted. */ error (parser->type_definition_forbidden_message); return false; } return true; } /* This function is called when the DECLARATOR is processed. The TYPE was a type defined in the decl-specifiers. If it is invalid to define a type in the decl-specifiers for DECLARATOR, an error is issued. TYPE_LOCATION is the location of TYPE and is used for error reporting. */ static void cp_parser_check_for_definition_in_return_type (cp_declarator *declarator, tree type, location_t type_location) { /* [dcl.fct] forbids type definitions in return types. Unfortunately, it's not easy to know whether or not we are processing a return type until after the fact. */ while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_reference || declarator->kind == cdk_ptrmem)) declarator = declarator->declarator; if (declarator && declarator->kind == cdk_function) { error_at (type_location, "new types may not be defined in a return type"); inform (type_location, "(perhaps a semicolon is missing after the definition of %qT)", type); } } /* A type-specifier (TYPE) has been parsed which cannot be followed by "<" in any valid C++ program. If the next token is indeed "<", issue a message warning the user about what appears to be an invalid attempt to form a template-id. LOCATION is the location of the type-specifier (TYPE) */ static void cp_parser_check_for_invalid_template_id (cp_parser* parser, tree type, location_t location) { cp_token_position start = 0; if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { if (TYPE_P (type)) error_at (location, "%qT is not a template", type); else if (TREE_CODE (type) == IDENTIFIER_NODE) error_at (location, "%qE is not a template", type); else error_at (location, "invalid template-id"); /* Remember the location of the invalid "<". */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Consume the "<". */ cp_lexer_consume_token (parser->lexer); /* Parse the template arguments. */ cp_parser_enclosed_template_argument_list (parser); /* Permanently remove the invalid template arguments so that this error message is not issued again. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); } } /* If parsing an integral constant-expression, issue an error message about the fact that THING appeared and return true. Otherwise, return false. In either case, set PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */ static bool cp_parser_non_integral_constant_expression (cp_parser *parser, non_integral_constant thing) { parser->non_integral_constant_expression_p = true; if (parser->integral_constant_expression_p) { if (!parser->allow_non_integral_constant_expression_p) { const char *msg = NULL; switch (thing) { case NIC_FLOAT: error ("floating-point literal " "cannot appear in a constant-expression"); return true; case NIC_CAST: error ("a cast to a type other than an integral or " "enumeration type cannot appear in a " "constant-expression"); return true; case NIC_TYPEID: error ("%<typeid%> operator " "cannot appear in a constant-expression"); return true; case NIC_NCC: error ("non-constant compound literals " "cannot appear in a constant-expression"); return true; case NIC_FUNC_CALL: error ("a function call " "cannot appear in a constant-expression"); return true; case NIC_INC: error ("an increment " "cannot appear in a constant-expression"); return true; case NIC_DEC: error ("an decrement " "cannot appear in a constant-expression"); return true; case NIC_ARRAY_REF: error ("an array reference " "cannot appear in a constant-expression"); return true; case NIC_ADDR_LABEL: error ("the address of a label " "cannot appear in a constant-expression"); return true; case NIC_OVERLOADED: error ("calls to overloaded operators " "cannot appear in a constant-expression"); return true; case NIC_ASSIGNMENT: error ("an assignment cannot appear in a constant-expression"); return true; case NIC_COMMA: error ("a comma operator " "cannot appear in a constant-expression"); return true; case NIC_CONSTRUCTOR: error ("a call to a constructor " "cannot appear in a constant-expression"); return true; case NIC_TRANSACTION: error ("a transaction expression " "cannot appear in a constant-expression"); return true; case NIC_THIS: msg = "this"; break; case NIC_FUNC_NAME: msg = "__FUNCTION__"; break; case NIC_PRETTY_FUNC: msg = "__PRETTY_FUNCTION__"; break; case NIC_C99_FUNC: msg = "__func__"; break; case NIC_VA_ARG: msg = "va_arg"; break; case NIC_ARROW: msg = "->"; break; case NIC_POINT: msg = "."; break; case NIC_STAR: msg = "*"; break; case NIC_ADDR: msg = "&"; break; case NIC_PREINCREMENT: msg = "++"; break; case NIC_PREDECREMENT: msg = "--"; break; case NIC_NEW: msg = "new"; break; case NIC_DEL: msg = "delete"; break; default: gcc_unreachable (); } if (msg) error ("%qs cannot appear in a constant-expression", msg); return true; } } return false; } /* Emit a diagnostic for an invalid type name. SCOPE is the qualifying scope (or NULL, if none) for ID. This function commits to the current active tentative parse, if any. (Otherwise, the problematic construct might be encountered again later, resulting in duplicate error messages.) LOCATION is the location of ID. */ static void cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree scope, tree id, location_t location) { tree decl, old_scope; cp_parser_commit_to_tentative_parse (parser); /* Try to lookup the identifier. */ old_scope = parser->scope; parser->scope = scope; decl = cp_parser_lookup_name_simple (parser, id, location); parser->scope = old_scope; /* If the lookup found a template-name, it means that the user forgot to specify an argument list. Emit a useful error message. */ if (TREE_CODE (decl) == TEMPLATE_DECL) error_at (location, "invalid use of template-name %qE without an argument list", decl); else if (TREE_CODE (id) == BIT_NOT_EXPR) error_at (location, "invalid use of destructor %qD as a type", id); else if (TREE_CODE (decl) == TYPE_DECL) /* Something like 'unsigned A a;' */ error_at (location, "invalid combination of multiple type-specifiers"); else if (!parser->scope) { /* Issue an error message. */ error_at (location, "%qE does not name a type", id); /* If we're in a template class, it's possible that the user was referring to a type from a base class. For example: template <typename T> struct A { typedef T X; }; template <typename T> struct B : public A<T> { X x; }; The user should have said "typename A<T>::X". */ if (cxx_dialect < cxx0x && id == ridpointers[(int)RID_CONSTEXPR]) inform (location, "C++11 %<constexpr%> only available with " "-std=c++11 or -std=gnu++11"); else if (processing_template_decl && current_class_type && TYPE_BINFO (current_class_type)) { tree b; for (b = TREE_CHAIN (TYPE_BINFO (current_class_type)); b; b = TREE_CHAIN (b)) { tree base_type = BINFO_TYPE (b); if (CLASS_TYPE_P (base_type) && dependent_type_p (base_type)) { tree field; /* Go from a particular instantiation of the template (which will have an empty TYPE_FIELDs), to the main version. */ base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type); for (field = TYPE_FIELDS (base_type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == TYPE_DECL && DECL_NAME (field) == id) { inform (location, "(perhaps %<typename %T::%E%> was intended)", BINFO_TYPE (b), id); break; } if (field) break; } } } } /* Here we diagnose qualified-ids where the scope is actually correct, but the identifier does not resolve to a valid type name. */ else if (parser->scope != error_mark_node) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error_at (location, "%qE in namespace %qE does not name a type", id, parser->scope); else if (CLASS_TYPE_P (parser->scope) && constructor_name_p (id, parser->scope)) { /* A<T>::A<T>() */ error_at (location, "%<%T::%E%> names the constructor, not" " the type", parser->scope, id); if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) error_at (location, "and %qT has no template constructors", parser->scope); } else if (TYPE_P (parser->scope) && dependent_scope_p (parser->scope)) error_at (location, "need %<typename%> before %<%T::%E%> because " "%qT is a dependent scope", parser->scope, id, parser->scope); else if (TYPE_P (parser->scope)) error_at (location, "%qE in %q#T does not name a type", id, parser->scope); else gcc_unreachable (); } } /* Check for a common situation where a type-name should be present, but is not, and issue a sensible error message. Returns true if an invalid type-name was detected. The situation handled by this function are variable declarations of the form `ID a', where `ID' is an id-expression and `a' is a plain identifier. Usually, `ID' should name a type, but if we got here it means that it does not. We try to emit the best possible error message depending on how exactly the id-expression looks like. */ static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser) { tree id; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Avoid duplicate error about ambiguous lookup. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { cp_token *next = cp_lexer_peek_nth_token (parser->lexer, 2); if (next->type == CPP_NAME && next->ambiguous_p) goto out; } cp_parser_parse_tentatively (parser); id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/true, /*optional_p=*/false); /* If the next token is a (, this is a function with no explicit return type, i.e. constructor, destructor or conversion op. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || TREE_CODE (id) == TYPE_DECL) { cp_parser_abort_tentative_parse (parser); return false; } if (!cp_parser_parse_definitely (parser)) return false; /* Emit a diagnostic for the invalid type. */ cp_parser_diagnose_invalid_type_name (parser, parser->scope, id, token->location); out: /* If we aren't in the middle of a declarator (i.e. in a parameter-declaration-clause), skip to the end of the declaration; there's no point in trying to process it. */ if (!parser->in_declarator_p) cp_parser_skip_to_end_of_block_or_statement (parser); return true; } /* Consume tokens up to, and including, the next non-nested closing `)'. Returns 1 iff we found a closing `)'. RECOVERING is true, if we are doing error recovery. Returns -1 if OR_COMMA is true and we found an unnested comma. */ static int cp_parser_skip_to_closing_parenthesis (cp_parser *parser, bool recovering, bool or_comma, bool consume_paren) { unsigned paren_depth = 0; unsigned brace_depth = 0; unsigned square_depth = 0; if (recovering && !or_comma && cp_parser_uncommitted_to_tentative_parse_p (parser)) return 0; while (true) { cp_token * token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, then there is no closing `)'. */ return 0; /* This is good for lambda expression capture-lists. */ case CPP_OPEN_SQUARE: ++square_depth; break; case CPP_CLOSE_SQUARE: if (!square_depth--) return 0; break; case CPP_SEMICOLON: /* This matches the processing in skip_to_end_of_statement. */ if (!brace_depth) return 0; break; case CPP_OPEN_BRACE: ++brace_depth; break; case CPP_CLOSE_BRACE: if (!brace_depth--) return 0; break; case CPP_COMMA: if (recovering && or_comma && !brace_depth && !paren_depth && !square_depth) return -1; break; case CPP_OPEN_PAREN: if (!brace_depth) ++paren_depth; break; case CPP_CLOSE_PAREN: if (!brace_depth && !paren_depth--) { if (consume_paren) cp_lexer_consume_token (parser->lexer); return 1; } break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the current statement. Normally, that will be just before consuming a `;'. However, if a non-nested `}' comes first, then we stop before consuming that. */ static void cp_parser_skip_to_end_of_statement (cp_parser* parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* If the next token is a `;', we have reached the end of the statement. */ if (!nesting_depth) return; break; case CPP_CLOSE_BRACE: /* If this is a non-nested '}', stop before consuming it. That way, when confronted with something like: { 3 + } we stop before consuming the closing '}', even though we have not yet reached a `;'. */ if (nesting_depth == 0) return; /* If it is the closing '}' for a block that we have scanned, stop -- but only after consuming the token. That way given: void f g () { ... } typedef int I; we will stop after the body of the erroneously declared function, but before consuming the following `typedef' declaration. */ if (--nesting_depth == 0) { cp_lexer_consume_token (parser->lexer); return; } case CPP_OPEN_BRACE: ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* This function is called at the end of a statement or declaration. If the next token is a semicolon, it is consumed; otherwise, error recovery is attempted. */ static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser) { /* Look for the trailing `;'. */ if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) { /* If there is additional (erroneous) input, skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested `;'. */ static void cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser) { int nesting_depth = 0; while (nesting_depth >= 0) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* Stop if this is an unnested ';'. */ if (!nesting_depth) nesting_depth = -1; break; case CPP_CLOSE_BRACE: /* Stop if this is an unnested '}', or closes the outermost nesting level. */ nesting_depth--; if (nesting_depth < 0) return; if (!nesting_depth) nesting_depth = -1; break; case CPP_OPEN_BRACE: /* Nest. */ nesting_depth++; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until a non-nested closing curly brace is the next token, or there are no more tokens. Return true in the first case, false otherwise. */ static bool cp_parser_skip_to_closing_brace (cp_parser *parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return false; case CPP_CLOSE_BRACE: /* If the next token is a non-nested `}', then we have reached the end of the current block. */ if (nesting_depth-- == 0) return true; break; case CPP_OPEN_BRACE: /* If it the next token is a `{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK parameter is the PRAGMA token, allowing us to purge the entire pragma sequence. */ static void cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok) { cp_token *token; parser->lexer->in_pragma = false; do token = cp_lexer_consume_token (parser->lexer); while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF); /* Ensure that the pragma is not parsed again. */ cp_lexer_purge_tokens_after (parser->lexer, pragma_tok); } /* Require pragma end of line, resyncing with it as necessary. The arguments are as for cp_parser_skip_to_pragma_eol. */ static void cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok) { parser->lexer->in_pragma = false; if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL)) cp_parser_skip_to_pragma_eol (parser, pragma_tok); } /* This is a simple wrapper around make_typename_type. When the id is an unresolved identifier node, we can provide a superior diagnostic using cp_parser_diagnose_invalid_type_name. */ static tree cp_parser_make_typename_type (cp_parser *parser, tree scope, tree id, location_t id_location) { tree result; if (TREE_CODE (id) == IDENTIFIER_NODE) { result = make_typename_type (scope, id, typename_type, /*complain=*/tf_none); if (result == error_mark_node) cp_parser_diagnose_invalid_type_name (parser, scope, id, id_location); return result; } return make_typename_type (scope, id, typename_type, tf_error); } /* This is a wrapper around the make_{pointer,ptrmem,reference}_declarator functions that decides which one to call based on the CODE and CLASS_TYPE arguments. The CODE argument should be one of the values returned by cp_parser_ptr_operator. */ static cp_declarator * cp_parser_make_indirect_declarator (enum tree_code code, tree class_type, cp_cv_quals cv_qualifiers, cp_declarator *target) { if (code == ERROR_MARK) return cp_error_declarator; if (code == INDIRECT_REF) if (class_type == NULL_TREE) return make_pointer_declarator (cv_qualifiers, target); else return make_ptrmem_declarator (cv_qualifiers, class_type, target); else if (code == ADDR_EXPR && class_type == NULL_TREE) return make_reference_declarator (cv_qualifiers, target, false); else if (code == NON_LVALUE_EXPR && class_type == NULL_TREE) return make_reference_declarator (cv_qualifiers, target, true); gcc_unreachable (); } /* Create a new C++ parser. */ static cp_parser * cp_parser_new (void) { cp_parser *parser; cp_lexer *lexer; unsigned i; /* cp_lexer_new_main is called before doing GC allocation because cp_lexer_new_main might load a PCH file. */ lexer = cp_lexer_new_main (); /* Initialize the binops_by_token so that we can get the tree directly from the token. */ for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++) binops_by_token[binops[i].token_type] = binops[i]; parser = ggc_alloc_cleared_cp_parser (); parser->lexer = lexer; parser->context = cp_parser_context_new (NULL); /* For now, we always accept GNU extensions. */ parser->allow_gnu_extensions_p = 1; /* The `>' token is a greater-than operator, not the end of a template-id. */ parser->greater_than_is_operator_p = true; parser->default_arg_ok_p = true; /* We are not parsing a constant-expression. */ parser->integral_constant_expression_p = false; parser->allow_non_integral_constant_expression_p = false; parser->non_integral_constant_expression_p = false; /* Local variable names are not forbidden. */ parser->local_variables_forbidden_p = false; /* We are not processing an `extern "C"' declaration. */ parser->in_unbraced_linkage_specification_p = false; /* We are not processing a declarator. */ parser->in_declarator_p = false; /* We are not processing a template-argument-list. */ parser->in_template_argument_list_p = false; /* We are not in an iteration statement. */ parser->in_statement = 0; /* We are not in a switch statement. */ parser->in_switch_statement_p = false; /* We are not parsing a type-id inside an expression. */ parser->in_type_id_in_expr_p = false; /* Declarations aren't implicitly extern "C". */ parser->implicit_extern_c = false; /* String literals should be translated to the execution character set. */ parser->translate_strings_p = true; /* We are not parsing a function body. */ parser->in_function_body = false; /* We can correct until told otherwise. */ parser->colon_corrects_to_scope_p = true; /* The unparsed function queue is empty. */ push_unparsed_function_queues (parser); /* There are no classes being defined. */ parser->num_classes_being_defined = 0; /* No template parameters apply. */ parser->num_template_parameter_lists = 0; return parser; } /* Create a cp_lexer structure which will emit the tokens in CACHE and push it onto the parser's lexer stack. This is used for delayed parsing of in-class method bodies and default arguments, and should not be confused with tentative parsing. */ static void cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache) { cp_lexer *lexer = cp_lexer_new_from_tokens (cache); lexer->next = parser->lexer; parser->lexer = lexer; /* Move the current source position to that of the first token in the new lexer. */ cp_lexer_set_source_position_from_token (lexer->next_token); } /* Pop the top lexer off the parser stack. This is never used for the "main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */ static void cp_parser_pop_lexer (cp_parser *parser) { cp_lexer *lexer = parser->lexer; parser->lexer = lexer->next; cp_lexer_destroy (lexer); /* Put the current source position back where it was before this lexer was pushed. */ cp_lexer_set_source_position_from_token (parser->lexer->next_token); } /* Lexical conventions [gram.lex] */ /* Parse an identifier. Returns an IDENTIFIER_NODE representing the identifier. */ static tree cp_parser_identifier (cp_parser* parser) { cp_token *token; /* Look for the identifier. */ token = cp_parser_require (parser, CPP_NAME, RT_NAME); /* Return the value. */ return token ? token->u.value : error_mark_node; } /* Parse a sequence of adjacent string constants. Returns a TREE_STRING representing the combined, nul-terminated string constant. If TRANSLATE is true, translate the string to the execution character set. If WIDE_OK is true, a wide string is invalid here. C++98 [lex.string] says that if a narrow string literal token is adjacent to a wide string literal token, the behavior is undefined. However, C99 6.4.5p4 says that this results in a wide string literal. We follow C99 here, for consistency with the C front end. This code is largely lifted from lex_string() in c-lex.c. FUTURE: ObjC++ will need to handle @-strings here. */ static tree cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok) { tree value; size_t count; struct obstack str_ob; cpp_string str, istr, *strs; cp_token *tok; enum cpp_ttype type, curr_type; int have_suffix_p = 0; tree string_tree; tree suffix_id = NULL_TREE; bool curr_tok_is_userdef_p = false; tok = cp_lexer_peek_token (parser->lexer); if (!cp_parser_is_string_literal (tok)) { cp_parser_error (parser, "expected string-literal"); return error_mark_node; } if (cpp_userdef_string_p (tok->type)) { string_tree = USERDEF_LITERAL_VALUE (tok->u.value); curr_type = cpp_userdef_string_remove_type (tok->type); curr_tok_is_userdef_p = true; } else { string_tree = tok->u.value; curr_type = tok->type; } type = curr_type; /* Try to avoid the overhead of creating and destroying an obstack for the common case of just one string. */ if (!cp_parser_is_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) { cp_lexer_consume_token (parser->lexer); str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree); str.len = TREE_STRING_LENGTH (string_tree); count = 1; if (curr_tok_is_userdef_p) { suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value); have_suffix_p = 1; curr_type = cpp_userdef_string_remove_type (tok->type); } else curr_type = tok->type; strs = &str; } else { gcc_obstack_init (&str_ob); count = 0; do { cp_lexer_consume_token (parser->lexer); count++; str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree); str.len = TREE_STRING_LENGTH (string_tree); if (curr_tok_is_userdef_p) { tree curr_suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value); if (have_suffix_p == 0) { suffix_id = curr_suffix_id; have_suffix_p = 1; } else if (have_suffix_p == 1 && curr_suffix_id != suffix_id) { error ("inconsistent user-defined literal suffixes" " %qD and %qD in string literal", suffix_id, curr_suffix_id); have_suffix_p = -1; } curr_type = cpp_userdef_string_remove_type (tok->type); } else curr_type = tok->type; if (type != curr_type) { if (type == CPP_STRING) type = curr_type; else if (curr_type != CPP_STRING) error_at (tok->location, "unsupported non-standard concatenation " "of string literals"); } obstack_grow (&str_ob, &str, sizeof (cpp_string)); tok = cp_lexer_peek_token (parser->lexer); if (cpp_userdef_string_p (tok->type)) { string_tree = USERDEF_LITERAL_VALUE (tok->u.value); curr_type = cpp_userdef_string_remove_type (tok->type); curr_tok_is_userdef_p = true; } else { string_tree = tok->u.value; curr_type = tok->type; curr_tok_is_userdef_p = false; } } while (cp_parser_is_string_literal (tok)); strs = (cpp_string *) obstack_finish (&str_ob); } if (type != CPP_STRING && !wide_ok) { cp_parser_error (parser, "a wide string is invalid in this context"); type = CPP_STRING; } if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate) (parse_in, strs, count, &istr, type)) { value = build_string (istr.len, (const char *)istr.text); free (CONST_CAST (unsigned char *, istr.text)); switch (type) { default: case CPP_STRING: case CPP_UTF8STRING: TREE_TYPE (value) = char_array_type_node; break; case CPP_STRING16: TREE_TYPE (value) = char16_array_type_node; break; case CPP_STRING32: TREE_TYPE (value) = char32_array_type_node; break; case CPP_WSTRING: TREE_TYPE (value) = wchar_array_type_node; break; } value = fix_string_type (value); if (have_suffix_p) { tree literal = build_userdef_literal (suffix_id, value, NULL_TREE); tok->u.value = literal; return cp_parser_userdef_string_literal (tok); } } else /* cpp_interpret_string has issued an error. */ value = error_mark_node; if (count > 1) obstack_free (&str_ob, 0); return value; } /* Look up a literal operator with the name and the exact arguments. */ static tree lookup_literal_operator (tree name, VEC(tree,gc) *args) { tree decl, fns; decl = lookup_name (name); if (!decl || !is_overloaded_fn (decl)) return error_mark_node; for (fns = decl; fns; fns = OVL_NEXT (fns)) { unsigned int ix; bool found = true; tree fn = OVL_CURRENT (fns); tree argtypes = NULL_TREE; argtypes = TYPE_ARG_TYPES (TREE_TYPE (fn)); if (argtypes != NULL_TREE) { for (ix = 0; ix < VEC_length (tree, args) && argtypes != NULL_TREE; ++ix, argtypes = TREE_CHAIN (argtypes)) { tree targ = TREE_VALUE (argtypes); tree tparm = TREE_TYPE (VEC_index (tree, args, ix)); bool ptr = TREE_CODE (targ) == POINTER_TYPE; bool arr = TREE_CODE (tparm) == ARRAY_TYPE; if ((ptr || arr || !same_type_p (targ, tparm)) && (!ptr || !arr || !same_type_p (TREE_TYPE (targ), TREE_TYPE (tparm)))) found = false; } if (found && ix == VEC_length (tree, args) /* May be this should be sufficient_parms_p instead, depending on how exactly should user-defined literals work in presence of default arguments on the literal operator parameters. */ && argtypes == void_list_node) return fn; } } return error_mark_node; } /* Parse a user-defined char constant. Returns a call to a user-defined literal operator taking the character as an argument. */ static tree cp_parser_userdef_char_literal (cp_parser *parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree value = USERDEF_LITERAL_VALUE (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree decl, result; /* Build up a call to the user-defined operator */ /* Lookup the name we got back from the id-expression. */ VEC(tree,gc) *args = make_tree_vector (); VEC_safe_push (tree, gc, args, value); decl = lookup_literal_operator (name, args); if (!decl || decl == error_mark_node) { error ("unable to find character literal operator %qD with %qT argument", name, TREE_TYPE (value)); release_tree_vector (args); return error_mark_node; } result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); if (result != error_mark_node) return result; error ("unable to find character literal operator %qD with %qT argument", name, TREE_TYPE (value)); return error_mark_node; } /* A subroutine of cp_parser_userdef_numeric_literal to create a char... template parameter pack from a string node. */ static tree make_char_string_pack (tree value) { tree charvec; tree argpack = make_node (NONTYPE_ARGUMENT_PACK); const char *str = TREE_STRING_POINTER (value); int i, len = TREE_STRING_LENGTH (value) - 1; tree argvec = make_tree_vec (1); /* Fill in CHARVEC with all of the parameters. */ charvec = make_tree_vec (len); for (i = 0; i < len; ++i) TREE_VEC_ELT (charvec, i) = build_int_cst (char_type_node, str[i]); /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, charvec); TREE_TYPE (argpack) = char_type_node; TREE_VEC_ELT (argvec, 0) = argpack; return argvec; } /* Parse a user-defined numeric constant. returns a call to a user-defined literal operator. */ static tree cp_parser_userdef_numeric_literal (cp_parser *parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree value = USERDEF_LITERAL_VALUE (literal); tree num_string = USERDEF_LITERAL_NUM_STRING (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree decl, result; VEC(tree,gc) *args; /* Look for a literal operator taking the exact type of numeric argument as the literal value. */ args = make_tree_vector (); VEC_safe_push (tree, gc, args, value); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_none); if (result != error_mark_node) { release_tree_vector (args); return result; } } release_tree_vector (args); /* If the numeric argument didn't work, look for a raw literal operator taking a const char* argument consisting of the number in string format. */ args = make_tree_vector (); VEC_safe_push (tree, gc, args, num_string); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_none); if (result != error_mark_node) { release_tree_vector (args); return result; } } release_tree_vector (args); /* If the raw literal didn't work, look for a non-type template function with parameter pack char.... Call the function with template parameter characters representing the number. */ args = make_tree_vector (); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { tree tmpl_args = make_char_string_pack (num_string); decl = lookup_template_function (decl, tmpl_args); result = finish_call_expr (decl, &args, false, true, tf_none); if (result != error_mark_node) { release_tree_vector (args); return result; } } release_tree_vector (args); error ("unable to find numeric literal operator %qD", name); return error_mark_node; } /* Parse a user-defined string constant. Returns a call to a user-defined literal operator taking a character pointer and the length of the string as arguments. */ static tree cp_parser_userdef_string_literal (cp_token *token) { tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree value = USERDEF_LITERAL_VALUE (literal); int len = TREE_STRING_LENGTH (value) / TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; tree decl, result; /* Build up a call to the user-defined operator */ /* Lookup the name we got back from the id-expression. */ VEC(tree,gc) *args = make_tree_vector (); VEC_safe_push (tree, gc, args, value); VEC_safe_push (tree, gc, args, build_int_cst (size_type_node, len)); decl = lookup_name (name); if (!decl || decl == error_mark_node) { error ("unable to find string literal operator %qD", name); release_tree_vector (args); return error_mark_node; } result = finish_call_expr (decl, &args, false, true, tf_none); release_tree_vector (args); if (result != error_mark_node) return result; error ("unable to find string literal operator %qD with %qT, %qT arguments", name, TREE_TYPE (value), size_type_node); return error_mark_node; } /* Basic concepts [gram.basic] */ /* Parse a translation-unit. translation-unit: declaration-seq [opt] Returns TRUE if all went well. */ static bool cp_parser_translation_unit (cp_parser* parser) { /* The address of the first non-permanent object on the declarator obstack. */ static void *declarator_obstack_base; bool success; /* Create the declarator obstack, if necessary. */ if (!cp_error_declarator) { gcc_obstack_init (&declarator_obstack); /* Create the error declarator. */ cp_error_declarator = make_declarator (cdk_error); /* Create the empty parameter list. */ no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE); /* Remember where the base of the declarator obstack lies. */ declarator_obstack_base = obstack_next_free (&declarator_obstack); } cp_parser_declaration_seq_opt (parser); /* If there are no tokens left then all went well. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { /* Get rid of the token array; we don't need it any more. */ cp_lexer_destroy (parser->lexer); parser->lexer = NULL; /* This file might have been a context that's implicitly extern "C". If so, pop the lang context. (Only relevant for PCH.) */ if (parser->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } /* Finish up. */ finish_translation_unit (); success = true; } else { cp_parser_error (parser, "expected declaration"); success = false; } /* Make sure the declarator obstack was fully cleaned up. */ gcc_assert (obstack_next_free (&declarator_obstack) == declarator_obstack_base); /* All went well. */ return success; } /* Expressions [gram.expr] */ /* Parse a primary-expression. primary-expression: literal this ( expression ) id-expression GNU Extensions: primary-expression: ( compound-statement ) __builtin_va_arg ( assignment-expression , type-id ) __builtin_offsetof ( type-id , offsetof-expression ) C++ Extensions: __has_nothrow_assign ( type-id ) __has_nothrow_constructor ( type-id ) __has_nothrow_copy ( type-id ) __has_trivial_assign ( type-id ) __has_trivial_constructor ( type-id ) __has_trivial_copy ( type-id ) __has_trivial_destructor ( type-id ) __has_virtual_destructor ( type-id ) __is_abstract ( type-id ) __is_base_of ( type-id , type-id ) __is_class ( type-id ) __is_convertible_to ( type-id , type-id ) __is_empty ( type-id ) __is_enum ( type-id ) __is_final ( type-id ) __is_literal_type ( type-id ) __is_pod ( type-id ) __is_polymorphic ( type-id ) __is_std_layout ( type-id ) __is_trivial ( type-id ) __is_union ( type-id ) Objective-C++ Extension: primary-expression: objc-expression literal: __null ADDRESS_P is true iff this expression was immediately preceded by "&" and therefore might denote a pointer-to-member. CAST_P is true iff this expression is the target of a cast. TEMPLATE_ARG_P is true iff this expression is a template argument. Returns a representation of the expression. Upon return, *IDK indicates what kind of id-expression (if any) was present. */ static tree cp_parser_primary_expression (cp_parser *parser, bool address_p, bool cast_p, bool template_arg_p, cp_id_kind *idk) { cp_token *token = NULL; /* Assume the primary expression is not an id-expression. */ *idk = CP_ID_KIND_NONE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { /* literal: integer-literal character-literal floating-literal string-literal boolean-literal pointer-literal user-defined-literal */ case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: case CPP_NUMBER: if (TREE_CODE (token->u.value) == USERDEF_LITERAL) return cp_parser_userdef_numeric_literal (parser); token = cp_lexer_consume_token (parser->lexer); if (TREE_CODE (token->u.value) == FIXED_CST) { error_at (token->location, "fixed-point types not supported in C++"); return error_mark_node; } /* Floating-point literals are only allowed in an integral constant expression if they are cast to an integral or enumeration type. */ if (TREE_CODE (token->u.value) == REAL_CST && parser->integral_constant_expression_p && pedantic) { /* CAST_P will be set even in invalid code like "int(2.7 + ...)". Therefore, we have to check that the next token is sure to end the cast. */ if (cast_p) { cp_token *next_token; next_token = cp_lexer_peek_token (parser->lexer); if (/* The comma at the end of an enumerator-definition. */ next_token->type != CPP_COMMA /* The curly brace at the end of an enum-specifier. */ && next_token->type != CPP_CLOSE_BRACE /* The end of a statement. */ && next_token->type != CPP_SEMICOLON /* The end of the cast-expression. */ && next_token->type != CPP_CLOSE_PAREN /* The end of an array bound. */ && next_token->type != CPP_CLOSE_SQUARE /* The closing ">" in a template-argument-list. */ && (next_token->type != CPP_GREATER || parser->greater_than_is_operator_p) /* C++0x only: A ">>" treated like two ">" tokens, in a template-argument-list. */ && (next_token->type != CPP_RSHIFT || (cxx_dialect == cxx98) || parser->greater_than_is_operator_p)) cast_p = false; } /* If we are within a cast, then the constraint that the cast is to an integral or enumeration type will be checked at that point. If we are not within a cast, then this code is invalid. */ if (!cast_p) cp_parser_non_integral_constant_expression (parser, NIC_FLOAT); } return token->u.value; case CPP_CHAR_USERDEF: case CPP_CHAR16_USERDEF: case CPP_CHAR32_USERDEF: case CPP_WCHAR_USERDEF: return cp_parser_userdef_char_literal (parser); case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: case CPP_STRING_USERDEF: case CPP_STRING16_USERDEF: case CPP_STRING32_USERDEF: case CPP_WSTRING_USERDEF: case CPP_UTF8STRING_USERDEF: /* ??? Should wide strings be allowed when parser->translate_strings_p is false (i.e. in attributes)? If not, we can kill the third argument to cp_parser_string_literal. */ return cp_parser_string_literal (parser, parser->translate_strings_p, true); case CPP_OPEN_PAREN: { tree expr; bool saved_greater_than_is_operator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* If we see `( { ' then we are looking at the beginning of a GNU statement-expression. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* Statement-expressions are not allowed by the standard. */ pedwarn (token->location, OPT_pedantic, "ISO C++ forbids braced-groups within expressions"); /* And they're not allowed outside of a function-body; you cannot, for example, write: int i = ({ int j = 3; j + 1; }); at class or namespace scope. */ if (!parser->in_function_body || parser->in_template_argument_list_p) { error_at (token->location, "statement-expressions are not allowed outside " "functions nor in template-argument lists"); cp_parser_skip_to_end_of_block_or_statement (parser); expr = error_mark_node; } else { /* Start the statement-expression. */ expr = begin_stmt_expr (); /* Parse the compound-statement. */ cp_parser_compound_statement (parser, expr, false, false); /* Finish up. */ expr = finish_stmt_expr (expr, false); } } else { /* Parse the parenthesized expression. */ expr = cp_parser_expression (parser, cast_p, idk); /* Let the front end know that this expression was enclosed in parentheses. This matters in case, for example, the expression is of the form `A::B', since `&A::B' might be a pointer-to-member, but `&(A::B)' is not. */ finish_parenthesized_expr (expr); /* DR 705: Wrapping an unqualified name in parentheses suppresses arg-dependent lookup. We want to pass back CP_ID_KIND_QUALIFIED for suppressing vtable lookup (c++/37862), but none of the others. */ if (*idk != CP_ID_KIND_QUALIFIED) *idk = CP_ID_KIND_NONE; } /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Consume the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_end_of_statement (parser); return expr; } case CPP_OPEN_SQUARE: if (c_dialect_objc ()) /* We have an Objective-C++ message. */ return cp_parser_objc_expression (parser); { tree lam = cp_parser_lambda_expression (parser); /* Don't warn about a failed tentative parse. */ if (cp_parser_error_occurred (parser)) return error_mark_node; maybe_warn_cpp0x (CPP0X_LAMBDA_EXPR); return lam; } case CPP_OBJC_STRING: if (c_dialect_objc ()) /* We have an Objective-C++ string literal. */ return cp_parser_objc_expression (parser); cp_parser_error (parser, "expected primary-expression"); return error_mark_node; case CPP_KEYWORD: switch (token->keyword) { /* These two are the boolean literals. */ case RID_TRUE: cp_lexer_consume_token (parser->lexer); return boolean_true_node; case RID_FALSE: cp_lexer_consume_token (parser->lexer); return boolean_false_node; /* The `__null' literal. */ case RID_NULL: cp_lexer_consume_token (parser->lexer); return null_node; /* The `nullptr' literal. */ case RID_NULLPTR: cp_lexer_consume_token (parser->lexer); return nullptr_node; /* Recognize the `this' keyword. */ case RID_THIS: cp_lexer_consume_token (parser->lexer); if (parser->local_variables_forbidden_p) { error_at (token->location, "%<this%> may not be used in this context"); return error_mark_node; } /* Pointers cannot appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_THIS)) return error_mark_node; return finish_this_expr (); /* The `operator' keyword can be the beginning of an id-expression. */ case RID_OPERATOR: goto id_expression; case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: { non_integral_constant name; /* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and __func__ are the names of variables -- but they are treated specially. Therefore, they are handled here, rather than relying on the generic id-expression logic below. Grammatically, these names are id-expressions. Consume the token. */ token = cp_lexer_consume_token (parser->lexer); switch (token->keyword) { case RID_FUNCTION_NAME: name = NIC_FUNC_NAME; break; case RID_PRETTY_FUNCTION_NAME: name = NIC_PRETTY_FUNC; break; case RID_C99_FUNCTION_NAME: name = NIC_C99_FUNC; break; default: gcc_unreachable (); } if (cp_parser_non_integral_constant_expression (parser, name)) return error_mark_node; /* Look up the name. */ return finish_fname (token->u.value); } case RID_VA_ARG: { tree expression; tree type; /* The `__builtin_va_arg' construct is used to handle `va_arg'. Consume the `__builtin_va_arg' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Now, parse the assignment-expression. */ expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Using `va_arg' in a constant-expression is not allowed. */ if (cp_parser_non_integral_constant_expression (parser, NIC_VA_ARG)) return error_mark_node; return build_x_va_arg (expression, type); } case RID_OFFSETOF: return cp_parser_builtin_offsetof (parser); case RID_HAS_NOTHROW_ASSIGN: case RID_HAS_NOTHROW_CONSTRUCTOR: case RID_HAS_NOTHROW_COPY: case RID_HAS_TRIVIAL_ASSIGN: case RID_HAS_TRIVIAL_CONSTRUCTOR: case RID_HAS_TRIVIAL_COPY: case RID_HAS_TRIVIAL_DESTRUCTOR: case RID_HAS_VIRTUAL_DESTRUCTOR: case RID_IS_ABSTRACT: case RID_IS_BASE_OF: case RID_IS_CLASS: case RID_IS_CONVERTIBLE_TO: case RID_IS_EMPTY: case RID_IS_ENUM: case RID_IS_FINAL: case RID_IS_LITERAL_TYPE: case RID_IS_POD: case RID_IS_POLYMORPHIC: case RID_IS_STD_LAYOUT: case RID_IS_TRIVIAL: case RID_IS_UNION: return cp_parser_trait_expr (parser, token->keyword); /* Objective-C++ expressions. */ case RID_AT_ENCODE: case RID_AT_PROTOCOL: case RID_AT_SELECTOR: return cp_parser_objc_expression (parser); case RID_TEMPLATE: if (parser->in_function_body && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS)) { error_at (token->location, "a template declaration cannot appear at block scope"); cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } /* An id-expression can start with either an identifier, a `::' as the beginning of a qualified-id, or the "operator" keyword. */ case CPP_NAME: case CPP_SCOPE: case CPP_TEMPLATE_ID: case CPP_NESTED_NAME_SPECIFIER: { tree id_expression; tree decl; const char *error_msg; bool template_p; bool done; cp_token *id_expr_token; id_expression: /* Parse the id-expression. */ id_expression = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); if (id_expression == error_mark_node) return error_mark_node; id_expr_token = token; token = cp_lexer_peek_token (parser->lexer); done = (token->type != CPP_OPEN_SQUARE && token->type != CPP_OPEN_PAREN && token->type != CPP_DOT && token->type != CPP_DEREF && token->type != CPP_PLUS_PLUS && token->type != CPP_MINUS_MINUS); /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR || TREE_CODE (id_expression) == TYPE_DECL) decl = id_expression; /* Look up the name. */ else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (id_expr_token->type == CPP_NAME && id_expr_token->ambiguous_p) { cp_parser_simulate_error (parser); return error_mark_node; } decl = cp_parser_lookup_name (parser, id_expression, none_type, template_p, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, id_expr_token->location); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* In Objective-C++, we may have an Objective-C 2.0 dot-syntax for classes here. */ if (c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && TREE_CODE (decl) == TYPE_DECL && objc_is_class_name (decl)) { tree component; cp_lexer_consume_token (parser->lexer); component = cp_parser_identifier (parser); if (component == error_mark_node) return error_mark_node; return objc_build_class_component_ref (id_expression, component); } /* In Objective-C++, an instance variable (ivar) may be preferred to whatever cp_parser_lookup_name() found. */ decl = objc_lookup_ivar (decl, id_expression); /* If name lookup gives us a SCOPE_REF, then the qualifying scope was dependent. */ if (TREE_CODE (decl) == SCOPE_REF) { /* At this point, we do not know if DECL is a valid integral constant expression. We assume that it is in fact such an expression, so that code like: template <int N> struct A { int a[B<N>::i]; }; is accepted. At template-instantiation time, we will check that B<N>::i is actually a constant. */ return decl; } /* Check to see if DECL is a local variable in a context where that is forbidden. */ if (parser->local_variables_forbidden_p && local_variable_p (decl)) { /* It might be that we only found DECL because we are trying to be generous with pre-ISO scoping rules. For example, consider: int i; void g() { for (int i = 0; i < 10; ++i) {} extern void f(int j = i); } Here, name look up will originally find the out of scope `i'. We need to issue a warning message, but then use the global `i'. */ decl = check_for_out_of_scope_variable (decl); if (local_variable_p (decl)) { error_at (id_expr_token->location, "local variable %qD may not appear in this context", decl); return error_mark_node; } } } decl = (finish_id_expression (id_expression, decl, parser->scope, idk, parser->integral_constant_expression_p, parser->allow_non_integral_constant_expression_p, &parser->non_integral_constant_expression_p, template_p, done, address_p, template_arg_p, &error_msg, id_expr_token->location)); if (error_msg) cp_parser_error (parser, error_msg); return decl; } /* Anything else is an error. */ default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } } /* Parse an id-expression. id-expression: unqualified-id qualified-id qualified-id: :: [opt] nested-name-specifier template [opt] unqualified-id :: identifier :: operator-function-id :: template-id Return a representation of the unqualified portion of the identifier. Sets PARSER->SCOPE to the qualifying scope if there is a `::' or nested-name-specifier. Often, if the id-expression was a qualified-id, the caller will want to make a SCOPE_REF to represent the qualified-id. This function does not do this in order to avoid wastefully creating SCOPE_REFs when they are not required. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword. If CHECK_DEPENDENCY_P is false, then names are looked up inside uninstantiated templates. If *TEMPLATE_P is non-NULL, it is set to true iff the `template' keyword is used to explicitly indicate that the entity named is a template. If DECLARATOR_P is true, the id-expression is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_id_expression (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool *template_p, bool declarator_p, bool optional_p) { bool global_scope_p; bool nested_name_specifier_p; /* Assume the `template' keyword was not used. */ if (template_p) *template_p = template_keyword_p; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, check_dependency_p, /*type_p=*/false, declarator_p) != NULL_TREE); /* If there is a nested-name-specifier, then we are looking at the first qualified-id production. */ if (nested_name_specifier_p) { tree saved_scope; tree saved_object_scope; tree saved_qualifying_scope; tree unqualified_id; bool is_template; /* See if the next token is the `template' keyword. */ if (!template_p) template_p = &is_template; *template_p = cp_parser_optional_template_keyword (parser); /* Name lookup we do during the processing of the unqualified-id might obliterate SCOPE. */ saved_scope = parser->scope; saved_object_scope = parser->object_scope; saved_qualifying_scope = parser->qualifying_scope; /* Process the final unqualified-id. */ unqualified_id = cp_parser_unqualified_id (parser, *template_p, check_dependency_p, declarator_p, /*optional_p=*/false); /* Restore the SAVED_SCOPE for our caller. */ parser->scope = saved_scope; parser->object_scope = saved_object_scope; parser->qualifying_scope = saved_qualifying_scope; return unqualified_id; } /* Otherwise, if we are in global scope, then we are looking at one of the other qualified-id productions. */ else if (global_scope_p) { cp_token *token; tree id; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an identifier, and the next token is not a "<", then we can avoid the template-id case. This is an optimization for this common case. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) return cp_parser_identifier (parser); cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Peek at the next token. (Changes in the token buffer may have invalidated the pointer obtained above.) */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: return cp_parser_identifier (parser); case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) return cp_parser_operator_function_id (parser); /* Fall through. */ default: cp_parser_error (parser, "expected id-expression"); return error_mark_node; } } else return cp_parser_unqualified_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p, optional_p); } /* Parse an unqualified-id. unqualified-id: identifier operator-function-id conversion-function-id ~ class-name template-id If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template' keyword, in a construct like `A::template ...'. Returns a representation of unqualified-id. For the `identifier' production, an IDENTIFIER_NODE is returned. For the `~ class-name' production a BIT_NOT_EXPR is returned; the operand of the BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the other productions, see the documentation accompanying the corresponding parsing functions. If CHECK_DEPENDENCY_P is false, names are looked up in uninstantiated templates. If DECLARATOR_P is true, the unqualified-id is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_unqualified_id (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool declarator_p, bool optional_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: { tree id; /* We don't know yet whether or not this will be a template-id. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, declarator_p); /* If it worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, it's an ordinary identifier. */ return cp_parser_identifier (parser); } case CPP_TEMPLATE_ID: return cp_parser_template_id (parser, template_keyword_p, check_dependency_p, declarator_p); case CPP_COMPL: { tree type_decl; tree qualifying_scope; tree object_scope; tree scope; bool done; /* Consume the `~' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the class-name. The standard, as written, seems to say that: template <typename T> struct S { ~S (); }; template <typename T> S<T>::~S() {} is invalid, since `~' must be followed by a class-name, but `S<T>' is dependent, and so not known to be a class. That's not right; we need to look in uninstantiated templates. A further complication arises from: template <typename T> void f(T t) { t.T::~T(); } Here, it is not possible to look up `T' in the scope of `T' itself. We must look in both the current scope, and the scope of the containing complete expression. Yet another issue is: struct S { int S; ~S(); }; S::~S() {} The standard does not seem to say that the `S' in `~S' should refer to the type `S' and not the data member `S::S'. */ /* DR 244 says that we look up the name after the "~" in the same scope as we looked up the qualifying name. That idea isn't fully worked out; it's more complicated than that. */ scope = parser->scope; object_scope = parser->object_scope; qualifying_scope = parser->qualifying_scope; /* Check for invalid scopes. */ if (scope == error_mark_node) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } if (scope && TREE_CODE (scope) == NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "scope %qT before %<~%> is not a class-name", scope); cp_parser_simulate_error (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } gcc_assert (!scope || TYPE_P (scope)); /* If the name is of the form "X::~X" it's OK even if X is a typedef. */ token = cp_lexer_peek_token (parser->lexer); if (scope && token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_LESS) && (token->u.value == TYPE_IDENTIFIER (scope) || (CLASS_TYPE_P (scope) && constructor_name_p (token->u.value, scope)))) { cp_lexer_consume_token (parser->lexer); return build_nt (BIT_NOT_EXPR, scope); } /* If there was an explicit qualification (S::~T), first look in the scope given by the qualification (i.e., S). Note: in the calls to cp_parser_class_name below we pass typename_type so that lookup finds the injected-class-name rather than the constructor. */ done = false; type_decl = NULL_TREE; if (scope) { cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "N::S::~S", look in "N" as well. */ if (!done && scope && qualifying_scope) { cp_parser_parse_tentatively (parser); parser->scope = qualifying_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "p->S::~T", look in the scope given by "*p" as well. */ else if (!done && object_scope) { cp_parser_parse_tentatively (parser); parser->scope = object_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* Look in the surrounding context. */ if (!done) { parser->scope = NULL_TREE; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; if (processing_template_decl) cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (processing_template_decl && ! cp_parser_parse_definitely (parser)) { /* We couldn't find a type with this name, so just accept it and check for a match at instantiation time. */ type_decl = cp_parser_identifier (parser); if (type_decl != error_mark_node) type_decl = build_nt (BIT_NOT_EXPR, type_decl); return type_decl; } } /* If an error occurred, assume that the name of the destructor is the same as the name of the qualifying class. That allows us to keep parsing after running into ill-formed destructor names. */ if (type_decl == error_mark_node && scope) return build_nt (BIT_NOT_EXPR, scope); else if (type_decl == error_mark_node) return error_mark_node; /* Check that destructor name and scope match. */ if (declarator_p && scope && !check_dtor_name (scope, type_decl)) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "declaration of %<~%T%> as member of %qT", type_decl, scope); cp_parser_simulate_error (parser); return error_mark_node; } /* [class.dtor] A typedef-name that names a class shall not be used as the identifier in the declarator for a destructor declaration. */ if (declarator_p && !DECL_IMPLICIT_TYPEDEF_P (type_decl) && !DECL_SELF_REFERENCE_P (type_decl) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "typedef-name %qD used as destructor declarator", type_decl); return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl)); } case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) { tree id; /* This could be a template-id, so we try that first. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* We still don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ id = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) id = cp_parser_conversion_function_id (parser); else if (UDLIT_OPER_P (id)) { /* 17.6.3.3.5 */ const char *name = UDLIT_OP_SUFFIX (id); if (name[0] != '_' && !in_system_header) warning (0, "literal operator suffixes not preceded by %<_%>" " are reserved for future standardization"); } return id; } /* Fall through. */ default: if (optional_p) return NULL_TREE; cp_parser_error (parser, "expected unqualified-id"); return error_mark_node; } } /* Parse an (optional) nested-name-specifier. nested-name-specifier: [C++98] class-or-namespace-name :: nested-name-specifier [opt] class-or-namespace-name :: template nested-name-specifier [opt] nested-name-specifier: [C++0x] type-name :: namespace-name :: nested-name-specifier identifier :: nested-name-specifier template [opt] simple-template-id :: PARSER->SCOPE should be set appropriately before this function is called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in effect. TYPE_P is TRUE if we non-type bindings should be ignored in name lookups. Sets PARSER->SCOPE to the class (TYPE) or namespace (NAMESPACE_DECL) specified by the nested-name-specifier, or leaves it unchanged if there is no nested-name-specifier. Returns the new scope iff there is a nested-name-specifier, or NULL_TREE otherwise. If IS_DECLARATION is TRUE, the nested-name-specifier is known to be part of a declaration and/or decl-specifier. */ static tree cp_parser_nested_name_specifier_opt (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { bool success = false; cp_token_position start = 0; cp_token *token; /* Remember where the nested-name-specifier starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { start = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); } while (true) { tree new_scope; tree old_scope; tree saved_qualifying_scope; bool template_keyword_p; /* Spot cases that cannot be the beginning of a nested-name-specifier. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is CPP_NESTED_NAME_SPECIFIER, just process the already parsed nested-name-specifier. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { /* Grab the nested-name-specifier and continue the loop. */ cp_parser_pre_parsed_nested_name_specifier (parser); /* If we originally encountered this nested-name-specifier with IS_DECLARATION set to false, we will not have resolved TYPENAME_TYPEs, so we must do so here. */ if (is_declaration && TREE_CODE (parser->scope) == TYPENAME_TYPE) { new_scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); if (TREE_CODE (new_scope) != TYPENAME_TYPE) parser->scope = new_scope; } success = true; continue; } /* Spot cases that cannot be the beginning of a nested-name-specifier. On the second and subsequent times through the loop, we look for the `template' keyword. */ if (success && token->keyword == RID_TEMPLATE) ; /* A template-id can start a nested-name-specifier. */ else if (token->type == CPP_TEMPLATE_ID) ; /* DR 743: decltype can be used in a nested-name-specifier. */ else if (token_is_decltype (token)) ; else { /* If the next token is not an identifier, then it is definitely not a type-name or namespace-name. */ if (token->type != CPP_NAME) break; /* If the following token is neither a `<' (to begin a template-id), nor a `::', then we are not looking at a nested-name-specifier. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON && parser->colon_corrects_to_scope_p && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_NAME) { error_at (token->location, "found %<:%> in nested-name-specifier, expected %<::%>"); token->type = CPP_SCOPE; } if (token->type != CPP_SCOPE && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) break; } /* The nested-name-specifier is optional, so we parse tentatively. */ cp_parser_parse_tentatively (parser); /* Look for the optional `template' keyword, if this isn't the first time through the loop. */ if (success) template_keyword_p = cp_parser_optional_template_keyword (parser); else template_keyword_p = false; /* Save the old scope since the name lookup we are about to do might destroy it. */ old_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; /* In a declarator-id like "X<T>::I::Y<T>" we must be able to look up names in "X<T>::I" in order to determine that "Y" is a template. So, if we have a typename at this point, we make an effort to look through it. */ if (is_declaration && !typename_keyword_p && parser->scope && TREE_CODE (parser->scope) == TYPENAME_TYPE) parser->scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); /* Parse the qualifying entity. */ new_scope = cp_parser_qualifying_entity (parser, typename_keyword_p, template_keyword_p, check_dependency_p, type_p, is_declaration); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); /* If we found what we wanted, we keep going; otherwise, we're done. */ if (!cp_parser_parse_definitely (parser)) { bool error_p = false; /* Restore the OLD_SCOPE since it was valid before the failed attempt at finding the last class-or-namespace-name. */ parser->scope = old_scope; parser->qualifying_scope = saved_qualifying_scope; /* If the next token is a decltype, and the one after that is a `::', then the decltype has failed to resolve to a class or enumeration type. Give this error even when parsing tentatively since it can't possibly be valid--and we're going to replace it with a CPP_NESTED_NAME_SPECIFIER below, so we won't get another chance.*/ if (cp_lexer_next_token_is (parser->lexer, CPP_DECLTYPE) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE)) { token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "decltype evaluates to %qT, " "which is not a class or enumeration type", token->u.value); parser->scope = error_mark_node; error_p = true; /* As below. */ success = true; cp_lexer_consume_token (parser->lexer); } if (cp_parser_uncommitted_to_tentative_parse_p (parser)) break; /* If the next token is an identifier, and the one after that is a `::', then any valid interpretation would have found a class-or-namespace-name. */ while (cp_lexer_next_token_is (parser->lexer, CPP_NAME) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE) && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)) { token = cp_lexer_consume_token (parser->lexer); if (!error_p) { if (!token->ambiguous_p) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, token->u.value, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, token->location); if (TREE_CODE (decl) == TEMPLATE_DECL) error_at (token->location, "%qD used without template parameters", decl); else if (ambiguous_decls) { error_at (token->location, "reference to %qD is ambiguous", token->u.value); print_candidates (ambiguous_decls); decl = error_mark_node; } else { if (cxx_dialect != cxx98) cp_parser_name_lookup_error (parser, token->u.value, decl, NLE_NOT_CXX98, token->location); else cp_parser_name_lookup_error (parser, token->u.value, decl, NLE_CXX98, token->location); } } parser->scope = error_mark_node; error_p = true; /* Treat this as a successful nested-name-specifier due to: [basic.lookup.qual] If the name found is not a class-name (clause _class_) or namespace-name (_namespace.def_), the program is ill-formed. */ success = true; } cp_lexer_consume_token (parser->lexer); } break; } /* We've found one valid nested-name-specifier. */ success = true; /* Name lookup always gives us a DECL. */ if (TREE_CODE (new_scope) == TYPE_DECL) new_scope = TREE_TYPE (new_scope); /* Uses of "template" must be followed by actual templates. */ if (template_keyword_p && !(CLASS_TYPE_P (new_scope) && ((CLASSTYPE_USE_TEMPLATE (new_scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope))) || CLASSTYPE_IS_TEMPLATE (new_scope))) && !(TREE_CODE (new_scope) == TYPENAME_TYPE && (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope)) == TEMPLATE_ID_EXPR))) permerror (input_location, TYPE_P (new_scope) ? G_("%qT is not a template") : G_("%qD is not a template"), new_scope); /* If it is a class scope, try to complete it; we are about to be looking up names inside the class. */ if (TYPE_P (new_scope) /* Since checking types for dependency can be expensive, avoid doing it if the type is already complete. */ && !COMPLETE_TYPE_P (new_scope) /* Do not try to complete dependent types. */ && !dependent_type_p (new_scope)) { new_scope = complete_type (new_scope); /* If it is a typedef to current class, use the current class instead, as the typedef won't have any names inside it yet. */ if (!COMPLETE_TYPE_P (new_scope) && currently_open_class (new_scope)) new_scope = TYPE_MAIN_VARIANT (new_scope); } /* Make sure we look in the right scope the next time through the loop. */ parser->scope = new_scope; } /* If parsing tentatively, replace the sequence of tokens that makes up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages. */ if (success && start) { cp_token *token; token = cp_lexer_token_at (parser->lexer, start); /* Reset the contents of the START token. */ token->type = CPP_NESTED_NAME_SPECIFIER; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = ggc_alloc_cleared_tree_check (); token->u.tree_check_value->value = parser->scope; token->u.tree_check_value->checks = get_deferred_access_checks (); token->u.tree_check_value->qualifying_scope = parser->qualifying_scope; token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start); } if (start) pop_to_parent_deferring_access_checks (); return success ? parser->scope : NULL_TREE; } /* Parse a nested-name-specifier. See cp_parser_nested_name_specifier_opt for details. This function behaves identically, except that it will an issue an error if no nested-name-specifier is present. */ static tree cp_parser_nested_name_specifier (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree scope; /* Look for the nested-name-specifier. */ scope = cp_parser_nested_name_specifier_opt (parser, typename_keyword_p, check_dependency_p, type_p, is_declaration); /* If it was not present, issue an error message. */ if (!scope) { cp_parser_error (parser, "expected nested-name-specifier"); parser->scope = NULL_TREE; } return scope; } /* Parse the qualifying entity in a nested-name-specifier. For C++98, this is either a class-name or a namespace-name (which corresponds to the class-or-namespace-name production in the grammar). For C++0x, it can also be a type-name that refers to an enumeration type or a simple-template-id. TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect. TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect. CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up. TYPE_P is TRUE iff the next name should be taken as a class-name, even the same name is declared to be another entity in the same scope. Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL) specified by the class-or-namespace-name. If neither is found the ERROR_MARK_NODE is returned. */ static tree cp_parser_qualifying_entity (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree scope; bool only_class_p; bool successful_parse_p; /* DR 743: decltype can appear in a nested-name-specifier. */ if (cp_lexer_next_token_is_decltype (parser->lexer)) { scope = cp_parser_decltype (parser); if (TREE_CODE (scope) != ENUMERAL_TYPE && !MAYBE_CLASS_TYPE_P (scope)) { cp_parser_simulate_error (parser); return error_mark_node; } if (TYPE_NAME (scope)) scope = TYPE_NAME (scope); return scope; } /* Before we try to parse the class-name, we must save away the current PARSER->SCOPE since cp_parser_class_name will destroy it. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* Try for a class-name first. If the SAVED_SCOPE is a type, then there is no need to look for a namespace-name. */ only_class_p = template_keyword_p || (saved_scope && TYPE_P (saved_scope) && cxx_dialect == cxx98); if (!only_class_p) cp_parser_parse_tentatively (parser); scope = cp_parser_class_name (parser, typename_keyword_p, template_keyword_p, type_p ? class_type : none_type, check_dependency_p, /*class_head_p=*/false, is_declaration); successful_parse_p = only_class_p || cp_parser_parse_definitely (parser); /* If that didn't work and we're in C++0x mode, try for a type-name. */ if (!only_class_p && cxx_dialect != cxx98 && !successful_parse_p) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* Parse tentatively. */ cp_parser_parse_tentatively (parser); /* Parse a type-name */ scope = cp_parser_type_name (parser); /* "If the name found does not designate a namespace or a class, enumeration, or dependent type, the program is ill-formed." We cover classes and dependent types above and namespaces below, so this code is only looking for enums. */ if (!scope || TREE_CODE (scope) != TYPE_DECL || TREE_CODE (TREE_TYPE (scope)) != ENUMERAL_TYPE) cp_parser_simulate_error (parser); successful_parse_p = cp_parser_parse_definitely (parser); } /* If that didn't work, try for a namespace-name. */ if (!only_class_p && !successful_parse_p) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If we are not looking at an identifier followed by the scope resolution operator, then this is not part of a nested-name-specifier. (Note that this function is only used to parse the components of a nested-name-specifier.) */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME) || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) return error_mark_node; scope = cp_parser_namespace_name (parser); } return scope; } /* Parse a postfix-expression. postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( expression-list [opt] ) simple-type-specifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier identifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier template [opt] template-id ( expression-list [opt] ) postfix-expression . template [opt] id-expression postfix-expression -> template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> pseudo-destructor-name postfix-expression ++ postfix-expression -- dynamic_cast < type-id > ( expression ) static_cast < type-id > ( expression ) reinterpret_cast < type-id > ( expression ) const_cast < type-id > ( expression ) typeid ( expression ) typeid ( type-id ) GNU Extension: postfix-expression: ( type-id ) { initializer-list , [opt] } This extension is a GNU version of the C99 compound-literal construct. (The C99 grammar uses `type-name' instead of `type-id', but they are essentially the same concept.) If ADDRESS_P is true, the postfix expression is the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. If MEMBER_ACCESS_ONLY_P, we only allow postfix expressions that are class member access expressions [expr.ref]. Returns a representation of the expression. */ static tree cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p, bool member_access_only_p, cp_id_kind * pidk_return) { cp_token *token; enum rid keyword; cp_id_kind idk = CP_ID_KIND_NONE; tree postfix_expression = NULL_TREE; bool is_member_access = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some of the productions are determined by keywords. */ keyword = token->keyword; switch (keyword) { case RID_DYNCAST: case RID_STATCAST: case RID_REINTCAST: case RID_CONSTCAST: { tree type; tree expression; const char *saved_message; /* All of these can be handled in the same way from the point of view of parsing. Begin by consuming the token identifying the cast. */ cp_lexer_consume_token (parser->lexer); /* New types cannot be defined in the cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in casts"); /* Look for the opening `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Parse the type to which we are casting. */ type = cp_parser_type_id (parser); /* Look for the closing `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; /* And the expression which is being cast. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); expression = cp_parser_expression (parser, /*cast_p=*/true, & idk); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CAST)) return error_mark_node; switch (keyword) { case RID_DYNCAST: postfix_expression = build_dynamic_cast (type, expression, tf_warning_or_error); break; case RID_STATCAST: postfix_expression = build_static_cast (type, expression, tf_warning_or_error); break; case RID_REINTCAST: postfix_expression = build_reinterpret_cast (type, expression, tf_warning_or_error); break; case RID_CONSTCAST: postfix_expression = build_const_cast (type, expression, tf_warning_or_error); break; default: gcc_unreachable (); } } break; case RID_TYPEID: { tree type; const char *saved_message; bool saved_in_type_id_in_expr_p; /* Consume the `typeid' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `(' token. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Types cannot be defined in a `typeid' expression. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a %<typeid%> expression"); /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Try a type-id first. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)' token. Otherwise, we can't be sure that we're not looking at an expression: consider `typeid (int (3))', for example. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, simply lookup the type-id. */ if (cp_parser_parse_definitely (parser)) postfix_expression = get_typeid (type); /* Otherwise, fall back to the expression variant. */ else { tree expression; /* Look for an expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false, & idk); /* Compute its typeid. */ postfix_expression = build_typeid (expression); /* Look for the `)' token. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* `typeid' may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_TYPEID)) return error_mark_node; } break; case RID_TYPENAME: { tree type; /* The syntax permitted here is the same permitted for an elaborated-type-specifier. */ type = cp_parser_elaborated_type_specifier (parser, /*is_friend=*/false, /*is_declaration=*/false); postfix_expression = cp_parser_functional_cast (parser, type); } break; default: { tree type; /* If the next thing is a simple-type-specifier, we may be looking at a functional cast. We could also be looking at an id-expression. So, we try the functional cast, and if that doesn't work we fall back to the primary-expression. */ cp_parser_parse_tentatively (parser); /* Look for the simple-type-specifier. */ type = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); /* Parse the cast itself. */ if (!cp_parser_error_occurred (parser)) postfix_expression = cp_parser_functional_cast (parser, type); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) break; /* If the functional-cast didn't work out, try a compound-literal. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { VEC(constructor_elt,gc) *initializer_list = NULL; bool saved_in_type_id_in_expr_p; cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Look for the `{'. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* If things aren't going well, there's no need to keep going. */ if (!cp_parser_error_occurred (parser)) { bool non_constant_p; /* Parse the initializer-list. */ initializer_list = cp_parser_initializer_list (parser, &non_constant_p); /* Allow a trailing `,'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* If that worked, we're definitely looking at a compound-literal expression. */ if (cp_parser_parse_definitely (parser)) { /* Warn the user that a compound literal is not allowed in standard C++. */ pedwarn (input_location, OPT_pedantic, "ISO C++ forbids compound-literals"); /* For simplicity, we disallow compound literals in constant-expressions. We could allow compound literals of integer type, whose initializer was a constant, in constant expressions. Permitting that usage, as a further extension, would not change the meaning of any currently accepted programs. (Of course, as compound literals are not part of ISO C++, the standard has nothing to say.) */ if (cp_parser_non_integral_constant_expression (parser, NIC_NCC)) { postfix_expression = error_mark_node; break; } /* Form the representation of the compound-literal. */ postfix_expression = (finish_compound_literal (type, build_constructor (init_list_type_node, initializer_list), tf_warning_or_error)); break; } } /* It must be a primary-expression. */ postfix_expression = cp_parser_primary_expression (parser, address_p, cast_p, /*template_arg_p=*/false, &idk); } break; } /* Keep looping until the postfix-expression is complete. */ while (true) { if (idk == CP_ID_KIND_UNQUALIFIED && TREE_CODE (postfix_expression) == IDENTIFIER_NODE && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) /* It is not a Koenig lookup function call. */ postfix_expression = unqualified_name_lookup_error (postfix_expression); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: postfix_expression = cp_parser_postfix_open_square_expression (parser, postfix_expression, false); idk = CP_ID_KIND_NONE; is_member_access = false; break; case CPP_OPEN_PAREN: /* postfix-expression ( expression-list [opt] ) */ { bool koenig_p; bool is_builtin_constant_p; bool saved_integral_constant_expression_p = false; bool saved_non_integral_constant_expression_p = false; VEC(tree,gc) *args; is_member_access = false; is_builtin_constant_p = DECL_IS_BUILTIN_CONSTANT_P (postfix_expression); if (is_builtin_constant_p) { /* The whole point of __builtin_constant_p is to allow non-constant expressions to appear as arguments. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; } args = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); if (is_builtin_constant_p) { parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; } if (args == NULL) { postfix_expression = error_mark_node; break; } /* Function calls are not permitted in constant-expressions. */ if (! builtin_valid_in_constant_expr_p (postfix_expression) && cp_parser_non_integral_constant_expression (parser, NIC_FUNC_CALL)) { postfix_expression = error_mark_node; release_tree_vector (args); break; } koenig_p = false; if (idk == CP_ID_KIND_UNQUALIFIED || idk == CP_ID_KIND_TEMPLATE_ID) { if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE) { if (!VEC_empty (tree, args)) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) postfix_expression = perform_koenig_lookup (postfix_expression, args, /*include_std=*/false, tf_warning_or_error); } else postfix_expression = unqualified_fn_lookup_error (postfix_expression); } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the expected resolution of DR 218. */ else if (!VEC_empty (tree, args) && is_overloaded_fn (postfix_expression)) { tree fn = get_first_fn (postfix_expression); fn = STRIP_TEMPLATE (fn); /* Do not do argument dependent lookup if regular lookup finds a member function or a block-scope function declaration. [basic.lookup.argdep]/3 */ if (!DECL_FUNCTION_MEMBER_P (fn) && !DECL_LOCAL_FUNCTION_P (fn)) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) postfix_expression = perform_koenig_lookup (postfix_expression, args, /*include_std=*/false, tf_warning_or_error); } } } if (TREE_CODE (postfix_expression) == COMPONENT_REF) { tree instance = TREE_OPERAND (postfix_expression, 0); tree fn = TREE_OPERAND (postfix_expression, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (args))) { postfix_expression = build_nt_call_vec (postfix_expression, args); release_tree_vector (args); break; } if (BASELINK_P (fn)) { postfix_expression = (build_new_method_call (instance, fn, &args, NULL_TREE, (idk == CP_ID_KIND_QUALIFIED ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, tf_warning_or_error)); } else postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/false, /*koenig_p=*/false, tf_warning_or_error); } else if (TREE_CODE (postfix_expression) == OFFSET_REF || TREE_CODE (postfix_expression) == MEMBER_REF || TREE_CODE (postfix_expression) == DOTSTAR_EXPR) postfix_expression = (build_offset_ref_call_from_tree (postfix_expression, &args)); else if (idk == CP_ID_KIND_QUALIFIED) /* A call to a static class member, or a namespace-scope function. */ postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/true, koenig_p, tf_warning_or_error); else /* All other function calls. */ postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/false, koenig_p, tf_warning_or_error); /* The POSTFIX_EXPRESSION is certainly no longer an id. */ idk = CP_ID_KIND_NONE; release_tree_vector (args); } break; case CPP_DOT: case CPP_DEREF: /* postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name */ /* Consume the `.' or `->' operator. */ cp_lexer_consume_token (parser->lexer); postfix_expression = cp_parser_postfix_dot_deref_expression (parser, token->type, postfix_expression, false, &idk, token->location); is_member_access = true; break; case CPP_PLUS_PLUS: /* postfix-expression ++ */ /* Consume the `++' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTINCREMENT_EXPR); /* Increments may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_INC)) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; is_member_access = false; break; case CPP_MINUS_MINUS: /* postfix-expression -- */ /* Consume the `--' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTDECREMENT_EXPR); /* Decrements may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_DEC)) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; is_member_access = false; break; default: if (pidk_return != NULL) * pidk_return = idk; if (member_access_only_p) return is_member_access? postfix_expression : error_mark_node; else return postfix_expression; } } /* We should never get here. */ gcc_unreachable (); return error_mark_node; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression [ expression ] postfix-expression [ braced-init-list ] (C++11) FOR_OFFSETOF is set if we're being called in that context, which changes how we deal with integer constant expressions. */ static tree cp_parser_postfix_open_square_expression (cp_parser *parser, tree postfix_expression, bool for_offsetof) { tree index; /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the index expression. */ /* ??? For offsetof, there is a question of what to allow here. If offsetof is not being used in an integral constant expression context, then we *could* get the right answer by computing the value at runtime. If we are in an integral constant expression context, then we might could accept any constant expression; hard to say without analysis. Rather than open the barn door too wide right away, allow only integer constant expressions here. */ if (for_offsetof) index = cp_parser_constant_expression (parser, false, NULL); else { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_nonconst_p; maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); index = cp_parser_braced_list (parser, &expr_nonconst_p); } else index = cp_parser_expression (parser, /*cast_p=*/false, NULL); } /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Build the ARRAY_REF. */ postfix_expression = grok_array_decl (postfix_expression, index); /* When not doing offsetof, array references are not permitted in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, NIC_ARRAY_REF))) postfix_expression = error_mark_node; return postfix_expression; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name FOR_OFFSETOF is set if we're being called in that context. That sorta limits what of the above we'll actually accept, but nevermind. TOKEN_TYPE is the "." or "->" token, which will already have been removed from the stream. */ static tree cp_parser_postfix_dot_deref_expression (cp_parser *parser, enum cpp_ttype token_type, tree postfix_expression, bool for_offsetof, cp_id_kind *idk, location_t location) { tree name; bool dependent_p; bool pseudo_destructor_p; tree scope = NULL_TREE; /* If this is a `->' operator, dereference the pointer. */ if (token_type == CPP_DEREF) postfix_expression = build_x_arrow (postfix_expression); /* Check to see whether or not the expression is type-dependent. */ dependent_p = type_dependent_expression_p (postfix_expression); /* The identifier following the `->' or `.' is not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; *idk = CP_ID_KIND_NONE; /* Enter the scope corresponding to the type of the object given by the POSTFIX_EXPRESSION. */ if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE) { scope = TREE_TYPE (postfix_expression); /* According to the standard, no expression should ever have reference type. Unfortunately, we do not currently match the standard in this respect in that our internal representation of an expression may have reference type even when the standard says it does not. Therefore, we have to manually obtain the underlying type here. */ scope = non_reference (scope); /* The type of the POSTFIX_EXPRESSION must be complete. */ if (scope == unknown_type_node) { error_at (location, "%qE does not have class type", postfix_expression); scope = NULL_TREE; } /* Unlike the object expression in other contexts, *this is not required to be of complete type for purposes of class member access (5.2.5) outside the member function body. */ else if (scope != current_class_ref && !(processing_template_decl && scope == current_class_type)) scope = complete_type_or_else (scope, NULL_TREE); /* Let the name lookup machinery know that we are processing a class member access expression. */ parser->context->object_type = scope; /* If something went wrong, we want to be able to discern that case, as opposed to the case where there was no SCOPE due to the type of expression being dependent. */ if (!scope) scope = error_mark_node; /* If the SCOPE was erroneous, make the various semantic analysis functions exit quickly -- and without issuing additional error messages. */ if (scope == error_mark_node) postfix_expression = error_mark_node; } /* Assume this expression is not a pseudo-destructor access. */ pseudo_destructor_p = false; /* If the SCOPE is a scalar type, then, if this is a valid program, we must be looking at a pseudo-destructor-name. If POSTFIX_EXPRESSION is type dependent, it can be pseudo-destructor-name or something else. Try to parse it as pseudo-destructor-name first. */ if ((scope && SCALAR_TYPE_P (scope)) || dependent_p) { tree s; tree type; cp_parser_parse_tentatively (parser); /* Parse the pseudo-destructor-name. */ s = NULL_TREE; cp_parser_pseudo_destructor_name (parser, &s, &type); if (dependent_p && (cp_parser_error_occurred (parser) || TREE_CODE (type) != TYPE_DECL || !SCALAR_TYPE_P (TREE_TYPE (type)))) cp_parser_abort_tentative_parse (parser); else if (cp_parser_parse_definitely (parser)) { pseudo_destructor_p = true; postfix_expression = finish_pseudo_destructor_expr (postfix_expression, s, TREE_TYPE (type)); } } if (!pseudo_destructor_p) { /* If the SCOPE is not a scalar type, we are looking at an ordinary class member access expression, rather than a pseudo-destructor-name. */ bool template_p; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Parse the id-expression. */ name = (cp_parser_id_expression (parser, cp_parser_optional_template_keyword (parser), /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false)); /* In general, build a SCOPE_REF if the member name is qualified. However, if the name was not dependent and has already been resolved; there is no need to build the SCOPE_REF. For example; struct X { void f(); }; template <typename T> void f(T* t) { t->X::f(); } Even though "t" is dependent, "X::f" is not and has been resolved to a BASELINK; there is no need to include scope information. */ /* But we do need to remember that there was an explicit scope for virtual function calls. */ if (parser->scope) *idk = CP_ID_KIND_QUALIFIED; /* If the name is a template-id that names a type, we will get a TYPE_DECL here. That is invalid code. */ if (TREE_CODE (name) == TYPE_DECL) { error_at (token->location, "invalid use of %qD", name); postfix_expression = error_mark_node; } else { if (name != error_mark_node && !BASELINK_P (name) && parser->scope) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) { error_at (token->location, "%<%D::%D%> is not a class member", parser->scope, name); postfix_expression = error_mark_node; } else name = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, template_p); parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } if (parser->scope && name && BASELINK_P (name)) adjust_result_of_qualified_name_lookup (name, parser->scope, scope); postfix_expression = finish_class_member_access_expr (postfix_expression, name, template_p, tf_warning_or_error); } } /* We no longer need to look up names in the scope of the object on the left-hand side of the `.' or `->' operator. */ parser->context->object_type = NULL_TREE; /* Outside of offsetof, these operators may not appear in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, token_type == CPP_DEREF ? NIC_ARROW : NIC_POINT))) postfix_expression = error_mark_node; return postfix_expression; } /* Parse a parenthesized expression-list. expression-list: assignment-expression expression-list, assignment-expression attribute-list: expression-list identifier identifier, expression-list CAST_P is true if this expression is the target of a cast. ALLOW_EXPANSION_P is true if this expression allows expansion of an argument pack. Returns a vector of trees. Each element is a representation of an assignment-expression. NULL is returned if the ( and or ) are missing. An empty, but allocated, vector is returned on no expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is id_attr if we are parsing an attribute list for an attribute that wants a plain identifier argument, normal_attr for an attribute that wants an expression, or non_attr if we aren't parsing an attribute list. If NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or not all of the expressions in the list were constant. */ static VEC(tree,gc) * cp_parser_parenthesized_expression_list (cp_parser* parser, int is_attribute_list, bool cast_p, bool allow_expansion_p, bool *non_constant_p) { VEC(tree,gc) *expression_list; bool fold_expr_p = is_attribute_list != non_attr; tree identifier = NULL_TREE; bool saved_greater_than_is_operator_p; /* Assume all the expressions will be constant. */ if (non_constant_p) *non_constant_p = false; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return NULL; expression_list = make_tree_vector (); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Consume expressions until there are no more. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) while (true) { tree expr; /* At the beginning of attribute lists, check to see if the next token is an identifier. */ if (is_attribute_list == id_attr && cp_lexer_peek_token (parser->lexer)->type == CPP_NAME) { cp_token *token; /* Consume the identifier. */ token = cp_lexer_consume_token (parser->lexer); /* Save the identifier. */ identifier = token->u.value; } else { bool expr_non_constant_p; /* Parse the next assignment-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* A braced-init-list. */ maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expr = cp_parser_braced_list (parser, &expr_non_constant_p); if (non_constant_p && expr_non_constant_p) *non_constant_p = true; } else if (non_constant_p) { expr = (cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, &expr_non_constant_p)); if (expr_non_constant_p) *non_constant_p = true; } else expr = cp_parser_assignment_expression (parser, cast_p, NULL); if (fold_expr_p) expr = fold_non_dependent_expr (expr); /* If we have an ellipsis, then this is an expression expansion. */ if (allow_expansion_p && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Build the argument pack. */ expr = make_pack_expansion (expr); } /* Add it to the list. We add error_mark_node expressions to the list, so that we can still tell if the correct form for a parenthesized expression-list is found. That gives better errors. */ VEC_safe_push (tree, gc, expression_list, expr); if (expr == error_mark_node) goto skip_comma; } /* After the first item, attribute lists look the same as expression lists. */ is_attribute_list = non_attr; get_comma:; /* If the next token isn't a `,', then we are done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { int ending; skip_comma:; /* We try and resync to an unnested comma, as that will give the user better diagnostics. */ ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; if (!ending) { parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; return NULL; } } parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; if (identifier) VEC_safe_insert (tree, gc, expression_list, 0, identifier); return expression_list; } /* Parse a pseudo-destructor-name. pseudo-destructor-name: :: [opt] nested-name-specifier [opt] type-name :: ~ type-name :: [opt] nested-name-specifier template template-id :: ~ type-name :: [opt] nested-name-specifier [opt] ~ type-name If either of the first two productions is used, sets *SCOPE to the TYPE specified before the final `::'. Otherwise, *SCOPE is set to NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name, or ERROR_MARK_NODE if the parse fails. */ static void cp_parser_pseudo_destructor_name (cp_parser* parser, tree* scope, tree* type) { bool nested_name_specifier_p; /* Assume that things will not work out. */ *type = error_mark_node; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); /* Now, if we saw a nested-name-specifier, we might be doing the second production. */ if (nested_name_specifier_p && cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-id. */ cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/false, /*is_declaration=*/true); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); } /* If the next token is not a `~', then there might be some additional qualification. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL)) { /* At this point, we're looking for "type-name :: ~". The type-name must not be a class-name, since this is a pseudo-destructor. So, it must be either an enum-name, or a typedef-name -- both of which are just identifiers. So, we peek ahead to check that the "::" and "~" tokens are present; if they are not, then we can avoid calling type_name. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_NAME || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE || cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL) { cp_parser_error (parser, "non-scalar type"); return; } /* Look for the type-name. */ *scope = TREE_TYPE (cp_parser_nonclass_name (parser)); if (*scope == error_mark_node) return; /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); } else *scope = NULL_TREE; /* Look for the `~'. */ cp_parser_require (parser, CPP_COMPL, RT_COMPL); /* Once we see the ~, this has to be a pseudo-destructor. */ if (!processing_template_decl && !cp_parser_error_occurred (parser)) cp_parser_commit_to_tentative_parse (parser); /* Look for the type-name again. We are not responsible for checking that it matches the first type-name. */ *type = cp_parser_nonclass_name (parser); } /* Parse a unary-expression. unary-expression: postfix-expression ++ cast-expression -- cast-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-id ) alignof ( type-id ) [C++0x] new-expression delete-expression GNU Extensions: unary-expression: __extension__ cast-expression __alignof__ unary-expression __alignof__ ( type-id ) alignof unary-expression [C++0x] __real__ cast-expression __imag__ cast-expression && identifier ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p, cp_id_kind * pidk) { cp_token *token; enum tree_code unary_operator; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some keywords give away the kind of expression. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_ALIGNOF: case RID_SIZEOF: { tree operand; enum tree_code op; op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand. */ operand = cp_parser_sizeof_operand (parser, keyword); if (TYPE_P (operand)) return cxx_sizeof_or_alignof_type (operand, op, true); else { /* ISO C++ defines alignof only with types, not with expressions. So pedwarn if alignof is used with a non- type expression. However, __alignof__ is ok. */ if (!strcmp (IDENTIFIER_POINTER (token->u.value), "alignof")) pedwarn (token->location, OPT_pedantic, "ISO C++ does not allow %<alignof%> " "with a non-type"); return cxx_sizeof_or_alignof_expr (operand, op, true); } } case RID_NEW: return cp_parser_new_expression (parser); case RID_DELETE: return cp_parser_delete_expression (parser); case RID_EXTENSION: { /* The saved value of the PEDANTIC flag. */ int saved_pedantic; tree expr; /* Save away the PEDANTIC flag. */ cp_parser_extension_opt (parser, &saved_pedantic); /* Parse the cast-expression. */ expr = cp_parser_simple_cast_expression (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return expr; } case RID_REALPART: case RID_IMAGPART: { tree expression; /* Consume the `__real__' or `__imag__' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* Create the complete representation. */ return build_x_unary_op ((keyword == RID_REALPART ? REALPART_EXPR : IMAGPART_EXPR), expression, tf_warning_or_error); } break; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: return cp_parser_transaction_expression (parser, keyword); case RID_NOEXCEPT: { tree expr; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; bool saved_greater_than_is_operator_p; cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in %<noexcept%> expressions"); saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; expr = cp_parser_expression (parser, false, NULL); --c_inhibit_evaluation_warnings; --cp_unevaluated_operand; parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; parser->type_definition_forbidden_message = saved_message; cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return finish_noexcept_expr (expr, tf_warning_or_error); } default: break; } } /* Look for the `:: new' and `:: delete', which also signal the beginning of a new-expression, or delete-expression, respectively. If the next token is `::', then it might be one of these. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) { enum rid keyword; /* See if the token after the `::' is one of the keywords in which we're interested. */ keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword; /* If it's `new', we have a new-expression. */ if (keyword == RID_NEW) return cp_parser_new_expression (parser); /* Similarly, for `delete'. */ else if (keyword == RID_DELETE) return cp_parser_delete_expression (parser); } /* Look for a unary operator. */ unary_operator = cp_parser_unary_operator (token); /* The `++' and `--' operators can be handled similarly, even though they are not technically unary-operators in the grammar. */ if (unary_operator == ERROR_MARK) { if (token->type == CPP_PLUS_PLUS) unary_operator = PREINCREMENT_EXPR; else if (token->type == CPP_MINUS_MINUS) unary_operator = PREDECREMENT_EXPR; /* Handle the GNU address-of-label extension. */ else if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_AND_AND) { tree identifier; tree expression; location_t loc = cp_lexer_peek_token (parser->lexer)->location; /* Consume the '&&' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); /* Create an expression representing the address. */ expression = finish_label_address_expr (identifier, loc); if (cp_parser_non_integral_constant_expression (parser, NIC_ADDR_LABEL)) expression = error_mark_node; return expression; } } if (unary_operator != ERROR_MARK) { tree cast_expression; tree expression = error_mark_node; non_integral_constant non_constant_p = NIC_NONE; /* Consume the operator token. */ token = cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ cast_expression = cp_parser_cast_expression (parser, unary_operator == ADDR_EXPR, /*cast_p=*/false, pidk); /* Now, build an appropriate representation. */ switch (unary_operator) { case INDIRECT_REF: non_constant_p = NIC_STAR; expression = build_x_indirect_ref (cast_expression, RO_UNARY_STAR, tf_warning_or_error); break; case ADDR_EXPR: non_constant_p = NIC_ADDR; /* Fall through. */ case BIT_NOT_EXPR: expression = build_x_unary_op (unary_operator, cast_expression, tf_warning_or_error); break; case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: non_constant_p = unary_operator == PREINCREMENT_EXPR ? NIC_PREINCREMENT : NIC_PREDECREMENT; /* Fall through. */ case UNARY_PLUS_EXPR: case NEGATE_EXPR: case TRUTH_NOT_EXPR: expression = finish_unary_op_expr (unary_operator, cast_expression); break; default: gcc_unreachable (); } if (non_constant_p != NIC_NONE && cp_parser_non_integral_constant_expression (parser, non_constant_p)) expression = error_mark_node; return expression; } return cp_parser_postfix_expression (parser, address_p, cast_p, /*member_access_only_p=*/false, pidk); } /* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a unary-operator, the corresponding tree code is returned. */ static enum tree_code cp_parser_unary_operator (cp_token* token) { switch (token->type) { case CPP_MULT: return INDIRECT_REF; case CPP_AND: return ADDR_EXPR; case CPP_PLUS: return UNARY_PLUS_EXPR; case CPP_MINUS: return NEGATE_EXPR; case CPP_NOT: return TRUTH_NOT_EXPR; case CPP_COMPL: return BIT_NOT_EXPR; default: return ERROR_MARK; } } /* Parse a new-expression. new-expression: :: [opt] new new-placement [opt] new-type-id new-initializer [opt] :: [opt] new new-placement [opt] ( type-id ) new-initializer [opt] Returns a representation of the expression. */ static tree cp_parser_new_expression (cp_parser* parser) { bool global_scope_p; VEC(tree,gc) *placement; tree type; VEC(tree,gc) *initializer; tree nelts; tree ret; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `new' operator. */ cp_parser_require_keyword (parser, RID_NEW, RT_NEW); /* There's no easy way to tell a new-placement from the `( type-id )' construct. */ cp_parser_parse_tentatively (parser); /* Look for a new-placement. */ placement = cp_parser_new_placement (parser); /* If that didn't work out, there's no new-placement. */ if (!cp_parser_parse_definitely (parser)) { if (placement != NULL) release_tree_vector (placement); placement = NULL; } /* If the next token is a `(', then we have a parenthesized type-id. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_token *token; const char *saved_message = parser->type_definition_forbidden_message; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ parser->type_definition_forbidden_message = G_("types may not be defined in a new-expression"); type = cp_parser_type_id (parser); parser->type_definition_forbidden_message = saved_message; /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); token = cp_lexer_peek_token (parser->lexer); /* There should not be a direct-new-declarator in this production, but GCC used to allowed this, so we check and emit a sensible error message for this case. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { error_at (token->location, "array bound forbidden after parenthesized type-id"); inform (token->location, "try removing the parentheses around the type-id"); cp_parser_direct_new_declarator (parser); } nelts = NULL_TREE; } /* Otherwise, there must be a new-type-id. */ else type = cp_parser_new_type_id (parser, &nelts); /* If the next token is a `(' or '{', then we have a new-initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) initializer = cp_parser_new_initializer (parser); else initializer = NULL; /* A new-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_NEW)) ret = error_mark_node; else { /* Create a representation of the new-expression. */ ret = build_new (&placement, type, nelts, &initializer, global_scope_p, tf_warning_or_error); } if (placement != NULL) release_tree_vector (placement); if (initializer != NULL) release_tree_vector (initializer); return ret; } /* Parse a new-placement. new-placement: ( expression-list ) Returns the same representation as for an expression-list. */ static VEC(tree,gc) * cp_parser_new_placement (cp_parser* parser) { VEC(tree,gc) *expression_list; /* Parse the expression-list. */ expression_list = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a new-type-id. new-type-id: type-specifier-seq new-declarator [opt] Returns the TYPE allocated. If the new-type-id indicates an array type, *NELTS is set to the number of elements in the last array bound; the TYPE will not include the last array bound. */ static tree cp_parser_new_type_id (cp_parser* parser, tree *nelts) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *new_declarator; cp_declarator *declarator; cp_declarator *outer_declarator; const char *saved_message; tree type; /* The type-specifier sequence must not contain type definitions. (It cannot contain declarations of new types either, but if they are not definitions we will catch that because they are not complete.) */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a new-type-id"); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifier_seq); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; /* Parse the new-declarator. */ new_declarator = cp_parser_new_declarator_opt (parser); /* Determine the number of elements in the last array dimension, if any. */ *nelts = NULL_TREE; /* Skip down to the last array dimension. */ declarator = new_declarator; outer_declarator = NULL; while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_ptrmem)) { outer_declarator = declarator; declarator = declarator->declarator; } while (declarator && declarator->kind == cdk_array && declarator->declarator && declarator->declarator->kind == cdk_array) { outer_declarator = declarator; declarator = declarator->declarator; } if (declarator && declarator->kind == cdk_array) { *nelts = declarator->u.array.bounds; if (*nelts == error_mark_node) *nelts = integer_one_node; if (outer_declarator) outer_declarator->declarator = declarator->declarator; else new_declarator = NULL; } type = groktypename (&type_specifier_seq, new_declarator, false); return type; } /* Parse an (optional) new-declarator. new-declarator: ptr-operator new-declarator [opt] direct-new-declarator Returns the declarator. */ static cp_declarator * cp_parser_new_declarator_opt (cp_parser* parser) { enum tree_code code; tree type; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Look for a ptr-operator. */ code = cp_parser_ptr_operator (parser, &type, &cv_quals); /* If that worked, look for more new-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_new_declarator_opt (parser); return cp_parser_make_indirect_declarator (code, type, cv_quals, declarator); } /* If the next token is a `[', there is a direct-new-declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) return cp_parser_direct_new_declarator (parser); return NULL; } /* Parse a direct-new-declarator. direct-new-declarator: [ expression ] direct-new-declarator [constant-expression] */ static cp_declarator * cp_parser_direct_new_declarator (cp_parser* parser) { cp_declarator *declarator = NULL; while (true) { tree expression; /* Look for the opening `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE); /* The first expression is not required to be constant. */ if (!declarator) { cp_token *token = cp_lexer_peek_token (parser->lexer); expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* The standard requires that the expression have integral type. DR 74 adds enumeration types. We believe that the real intent is that these expressions be handled like the expression in a `switch' condition, which also allows classes with a single conversion to integral or enumeration type. */ if (!processing_template_decl) { expression = build_expr_type_conversion (WANT_INT | WANT_ENUM, expression, /*complain=*/true); if (!expression) { error_at (token->location, "expression in new-declarator must have integral " "or enumeration type"); expression = error_mark_node; } } } /* But all the other expressions must be. */ else expression = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Add this bound to the declarator. */ declarator = make_array_declarator (declarator, expression); /* If the next token is not a `[', then there are no more bounds. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) break; } return declarator; } /* Parse a new-initializer. new-initializer: ( expression-list [opt] ) braced-init-list Returns a representation of the expression-list. */ static VEC(tree,gc) * cp_parser_new_initializer (cp_parser* parser) { VEC(tree,gc) *expression_list; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { tree t; bool expr_non_constant_p; maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); t = cp_parser_braced_list (parser, &expr_non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (t) = 1; expression_list = make_tree_vector_single (t); } else expression_list = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a delete-expression. delete-expression: :: [opt] delete cast-expression :: [opt] delete [ ] cast-expression Returns a representation of the expression. */ static tree cp_parser_delete_expression (cp_parser* parser) { bool global_scope_p; bool array_p; tree expression; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `delete' keyword. */ cp_parser_require_keyword (parser, RID_DELETE, RT_DELETE); /* See if the array syntax is in use. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Remember that this is the `[]' construct. */ array_p = true; } else array_p = false; /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* A delete-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_DEL)) return error_mark_node; return delete_sanity (expression, NULL_TREE, array_p, global_scope_p, tf_warning_or_error); } /* Returns true if TOKEN may start a cast-expression and false otherwise. */ static bool cp_parser_tokens_start_cast_expression (cp_parser *parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_COMMA: case CPP_SEMICOLON: case CPP_QUERY: case CPP_COLON: case CPP_CLOSE_SQUARE: case CPP_CLOSE_PAREN: case CPP_CLOSE_BRACE: case CPP_DOT: case CPP_DOT_STAR: case CPP_DEREF: case CPP_DEREF_STAR: case CPP_DIV: case CPP_MOD: case CPP_LSHIFT: case CPP_RSHIFT: case CPP_LESS: case CPP_GREATER: case CPP_LESS_EQ: case CPP_GREATER_EQ: case CPP_EQ_EQ: case CPP_NOT_EQ: case CPP_EQ: case CPP_MULT_EQ: case CPP_DIV_EQ: case CPP_MOD_EQ: case CPP_PLUS_EQ: case CPP_MINUS_EQ: case CPP_RSHIFT_EQ: case CPP_LSHIFT_EQ: case CPP_AND_EQ: case CPP_XOR_EQ: case CPP_OR_EQ: case CPP_XOR: case CPP_OR: case CPP_OR_OR: case CPP_EOF: return false; case CPP_OPEN_PAREN: /* In ((type ()) () the last () isn't a valid cast-expression, so the whole must be parsed as postfix-expression. */ return cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_CLOSE_PAREN; /* '[' may start a primary-expression in obj-c++. */ case CPP_OPEN_SQUARE: return c_dialect_objc (); default: return true; } } /* Parse a cast-expression. cast-expression: unary-expression ( type-id ) cast-expression ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p, cp_id_kind * pidk) { /* If it's a `(', then we might be looking at a cast. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type = NULL_TREE; tree expr = NULL_TREE; bool compound_literal_p; const char *saved_message; /* There's no way to know yet whether or not this is a cast. For example, `(int (3))' is a unary-expression, while `(int) 3' is a cast. So, we resort to parsing tentatively. */ cp_parser_parse_tentatively (parser); /* Types may not be defined in a cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in casts"); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* A very tricky bit is that `(struct S) { 3 }' is a compound-literal (which we permit in C++ as an extension). But, that construct is not a cast-expression -- it is a postfix-expression. (The reason is that `(struct S) { 3 }.i' is legal; if the compound-literal were a cast-expression, you'd need an extra set of parentheses.) But, if we parse the type-id, and it happens to be a class-specifier, then we will commit to the parse at that point, because we cannot undo the action that is done when creating a new class. So, then we cannot back up and do a postfix-expression. Therefore, we scan ahead to the closing `)', and check to see if the token after the `)' is a `{'. If so, we are not looking at a cast-expression. Save tokens so that we can put them back. */ cp_lexer_save_tokens (parser->lexer); /* Skip tokens until the next token is a closing parenthesis. If we find the closing `)', and the next token is a `{', then we are looking at a compound-literal. */ compound_literal_p = (cp_parser_skip_to_closing_parenthesis (parser, false, false, /*consume_paren=*/true) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)); /* Roll back the tokens we skipped. */ cp_lexer_rollback_tokens (parser->lexer); /* If we were looking at a compound-literal, simulate an error so that the call to cp_parser_parse_definitely below will fail. */ if (compound_literal_p) cp_parser_simulate_error (parser); else { bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; /* Look for the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* At this point this can only be either a cast or a parenthesized ctor such as `(T ())' that looks like a cast to function returning T. */ if (!cp_parser_error_occurred (parser) && cp_parser_tokens_start_cast_expression (parser)) { cp_parser_parse_definitely (parser); expr = cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/true, pidk); /* Warn about old-style casts, if so requested. */ if (warn_old_style_cast && !in_system_header && !VOID_TYPE_P (type) && current_lang_name != lang_name_c) warning (OPT_Wold_style_cast, "use of old-style cast"); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CAST)) return error_mark_node; /* Perform the cast. */ expr = build_c_cast (input_location, type, expr); return expr; } else cp_parser_abort_tentative_parse (parser); } /* If we get here, then it's not a cast, so it must be a unary-expression. */ return cp_parser_unary_expression (parser, address_p, cast_p, pidk); } /* Parse a binary expression of the general form: pm-expression: cast-expression pm-expression .* cast-expression pm-expression ->* cast-expression multiplicative-expression: pm-expression multiplicative-expression * pm-expression multiplicative-expression / pm-expression multiplicative-expression % pm-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression GNU Extension: relational-expression: relational-expression <? shift-expression relational-expression >? shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression and-expression: equality-expression and-expression & equality-expression exclusive-or-expression: and-expression exclusive-or-expression ^ and-expression inclusive-or-expression: exclusive-or-expression inclusive-or-expression | exclusive-or-expression logical-and-expression: inclusive-or-expression logical-and-expression && inclusive-or-expression logical-or-expression: logical-and-expression logical-or-expression || logical-and-expression All these are implemented with a single function like: binary-expression: simple-cast-expression binary-expression <token> binary-expression CAST_P is true if this expression is the target of a cast. The binops_by_token map is used to get the tree codes for each <token> type. binary-expressions are associated according to a precedence table. */ #define TOKEN_PRECEDENCE(token) \ (((token->type == CPP_GREATER \ || ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)) \ && !parser->greater_than_is_operator_p) \ ? PREC_NOT_OPERATOR \ : binops_by_token[token->type].prec) static tree cp_parser_binary_expression (cp_parser* parser, bool cast_p, bool no_toplevel_fold_p, enum cp_parser_prec prec, cp_id_kind * pidk) { cp_parser_expression_stack stack; cp_parser_expression_stack_entry *sp = &stack[0]; tree lhs, rhs; cp_token *token; enum tree_code tree_type, lhs_type, rhs_type; enum cp_parser_prec new_prec, lookahead_prec; tree overload; /* Parse the first expression. */ lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p, pidk); lhs_type = ERROR_MARK; if (cp_parser_error_occurred (parser)) return error_mark_node; for (;;) { /* Get an operator token. */ token = cp_lexer_peek_token (parser->lexer); if (warn_cxx0x_compat && token->type == CPP_RSHIFT && !parser->greater_than_is_operator_p) { if (warning_at (token->location, OPT_Wc__0x_compat, "%<>>%> operator is treated as" " two right angle brackets in C++11")) inform (token->location, "suggest parentheses around %<>>%> expression"); } new_prec = TOKEN_PRECEDENCE (token); /* Popping an entry off the stack means we completed a subexpression: - either we found a token which is not an operator (`>' where it is not an operator, or prec == PREC_NOT_OPERATOR), in which case popping will happen repeatedly; - or, we found an operator which has lower priority. This is the case where the recursive descent *ascends*, as in `3 * 4 + 5' after parsing `3 * 4'. */ if (new_prec <= prec) { if (sp == stack) break; else goto pop; } get_rhs: tree_type = binops_by_token[token->type].tree_type; /* We used the operator token. */ cp_lexer_consume_token (parser->lexer); /* For "false && x" or "true || x", x will never be executed; disable warnings while evaluating it. */ if (tree_type == TRUTH_ANDIF_EXPR) c_inhibit_evaluation_warnings += lhs == truthvalue_false_node; else if (tree_type == TRUTH_ORIF_EXPR) c_inhibit_evaluation_warnings += lhs == truthvalue_true_node; /* Extract another operand. It may be the RHS of this expression or the LHS of a new, higher priority expression. */ rhs = cp_parser_simple_cast_expression (parser); rhs_type = ERROR_MARK; /* Get another operator token. Look up its precedence to avoid building a useless (immediately popped) stack entry for common cases such as 3 + 4 + 5 or 3 * 4 + 5. */ token = cp_lexer_peek_token (parser->lexer); lookahead_prec = TOKEN_PRECEDENCE (token); if (lookahead_prec > new_prec) { /* ... and prepare to parse the RHS of the new, higher priority expression. Since precedence levels on the stack are monotonically increasing, we do not have to care about stack overflows. */ sp->prec = prec; sp->tree_type = tree_type; sp->lhs = lhs; sp->lhs_type = lhs_type; sp++; lhs = rhs; lhs_type = rhs_type; prec = new_prec; new_prec = lookahead_prec; goto get_rhs; pop: lookahead_prec = new_prec; /* If the stack is not empty, we have parsed into LHS the right side (`4' in the example above) of an expression we had suspended. We can use the information on the stack to recover the LHS (`3') from the stack together with the tree code (`MULT_EXPR'), and the precedence of the higher level subexpression (`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token, which will be used to actually build the additive expression. */ --sp; prec = sp->prec; tree_type = sp->tree_type; rhs = lhs; rhs_type = lhs_type; lhs = sp->lhs; lhs_type = sp->lhs_type; } /* Undo the disabling of warnings done above. */ if (tree_type == TRUTH_ANDIF_EXPR) c_inhibit_evaluation_warnings -= lhs == truthvalue_false_node; else if (tree_type == TRUTH_ORIF_EXPR) c_inhibit_evaluation_warnings -= lhs == truthvalue_true_node; overload = NULL; /* ??? Currently we pass lhs_type == ERROR_MARK and rhs_type == ERROR_MARK for everything that is not a binary expression. This makes warn_about_parentheses miss some warnings that involve unary operators. For unary expressions we should pass the correct tree_code unless the unary expression was surrounded by parentheses. */ if (no_toplevel_fold_p && lookahead_prec <= prec && sp == stack && TREE_CODE_CLASS (tree_type) == tcc_comparison) lhs = build2 (tree_type, boolean_type_node, lhs, rhs); else lhs = build_x_binary_op (tree_type, lhs, lhs_type, rhs, rhs_type, &overload, tf_warning_or_error); lhs_type = tree_type; /* If the binary operator required the use of an overloaded operator, then this expression cannot be an integral constant-expression. An overloaded operator can be used even if both operands are otherwise permissible in an integral constant-expression if at least one of the operands is of enumeration type. */ if (overload && cp_parser_non_integral_constant_expression (parser, NIC_OVERLOADED)) return error_mark_node; } return lhs; } /* Parse the `? expression : assignment-expression' part of a conditional-expression. The LOGICAL_OR_EXPR is the logical-or-expression that started the conditional-expression. Returns a representation of the entire conditional-expression. This routine is used by cp_parser_assignment_expression. ? expression : assignment-expression GNU Extensions: ? : assignment-expression */ static tree cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr) { tree expr; tree assignment_expr; struct cp_token *token; /* Consume the `?' token. */ cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_COLON) { pedwarn (token->location, OPT_pedantic, "ISO C++ does not allow ?: with omitted middle operand"); /* Implicit true clause. */ expr = NULL_TREE; c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_true_node; warn_for_omitted_condop (token->location, logical_or_expr); } else { bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* Parse the expression. */ c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_false_node; expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); c_inhibit_evaluation_warnings += ((logical_or_expr == truthvalue_true_node) - (logical_or_expr == truthvalue_false_node)); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* The next token should be a `:'. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* Parse the assignment-expression. */ assignment_expr = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); c_inhibit_evaluation_warnings -= logical_or_expr == truthvalue_true_node; /* Build the conditional-expression. */ return build_x_conditional_expr (logical_or_expr, expr, assignment_expr, tf_warning_or_error); } /* Parse an assignment-expression. assignment-expression: conditional-expression logical-or-expression assignment-operator assignment_expression throw-expression CAST_P is true if this expression is the target of a cast. Returns a representation for the expression. */ static tree cp_parser_assignment_expression (cp_parser* parser, bool cast_p, cp_id_kind * pidk) { tree expr; /* If the next token is the `throw' keyword, then we're looking at a throw-expression. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW)) expr = cp_parser_throw_expression (parser); /* Otherwise, it must be that we are looking at a logical-or-expression. */ else { /* Parse the binary expressions (logical-or-expression). */ expr = cp_parser_binary_expression (parser, cast_p, false, PREC_NOT_OPERATOR, pidk); /* If the next token is a `?' then we're actually looking at a conditional-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY)) return cp_parser_question_colon_clause (parser, expr); else { enum tree_code assignment_operator; /* If it's an assignment-operator, we're using the second production. */ assignment_operator = cp_parser_assignment_operator_opt (parser); if (assignment_operator != ERROR_MARK) { bool non_constant_p; /* Parse the right-hand side of the assignment. */ tree rhs = cp_parser_initializer_clause (parser, &non_constant_p); if (BRACE_ENCLOSED_INITIALIZER_P (rhs)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); /* An assignment may not appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_ASSIGNMENT)) return error_mark_node; /* Build the assignment expression. */ expr = build_x_modify_expr (expr, assignment_operator, rhs, tf_warning_or_error); } } } return expr; } /* Parse an (optional) assignment-operator. assignment-operator: one of = *= /= %= += -= >>= <<= &= ^= |= GNU Extension: assignment-operator: one of <?= >?= If the next token is an assignment operator, the corresponding tree code is returned, and the token is consumed. For example, for `+=', PLUS_EXPR is returned. For `=' itself, the code returned is NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%', TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment operator, ERROR_MARK is returned. */ static enum tree_code cp_parser_assignment_operator_opt (cp_parser* parser) { enum tree_code op; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EQ: op = NOP_EXPR; break; case CPP_MULT_EQ: op = MULT_EXPR; break; case CPP_DIV_EQ: op = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: op = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: op = PLUS_EXPR; break; case CPP_MINUS_EQ: op = MINUS_EXPR; break; case CPP_RSHIFT_EQ: op = RSHIFT_EXPR; break; case CPP_LSHIFT_EQ: op = LSHIFT_EXPR; break; case CPP_AND_EQ: op = BIT_AND_EXPR; break; case CPP_XOR_EQ: op = BIT_XOR_EXPR; break; case CPP_OR_EQ: op = BIT_IOR_EXPR; break; default: /* Nothing else is an assignment operator. */ op = ERROR_MARK; } /* If it was an assignment operator, consume it. */ if (op != ERROR_MARK) cp_lexer_consume_token (parser->lexer); return op; } /* Parse an expression. expression: assignment-expression expression , assignment-expression CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_expression (cp_parser* parser, bool cast_p, cp_id_kind * pidk) { tree expression = NULL_TREE; while (true) { tree assignment_expression; /* Parse the next assignment-expression. */ assignment_expression = cp_parser_assignment_expression (parser, cast_p, pidk); /* If this is the first assignment-expression, we can just save it away. */ if (!expression) expression = assignment_expression; else expression = build_x_compound_expr (expression, assignment_expression, tf_warning_or_error); /* If the next token is not a comma, then we are done with the expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* A comma operator cannot appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_COMMA)) expression = error_mark_node; } return expression; } /* Parse a constant-expression. constant-expression: conditional-expression If ALLOW_NON_CONSTANT_P a non-constant expression is silently accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P is false, NON_CONSTANT_P should be NULL. */ static tree cp_parser_constant_expression (cp_parser* parser, bool allow_non_constant_p, bool *non_constant_p) { bool saved_integral_constant_expression_p; bool saved_allow_non_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; tree expression; /* It might seem that we could simply parse the conditional-expression, and then check to see if it were TREE_CONSTANT. However, an expression that is TREE_CONSTANT is one that the compiler can figure out is constant, possibly after doing some simplifications or optimizations. The standard has a precise definition of constant-expression, and we must honor that, even though it is somewhat more restrictive. For example: int i[(2, 3)]; is not a legal declaration, because `(2, 3)' is not a constant-expression. The `,' operator is forbidden in a constant-expression. However, GCC's constant-folding machinery will fold this operation to an INTEGER_CST for `3'. */ /* Save the old settings. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_allow_non_integral_constant_expression_p = parser->allow_non_integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; /* We are now parsing a constant-expression. */ parser->integral_constant_expression_p = true; parser->allow_non_integral_constant_expression_p = (allow_non_constant_p || cxx_dialect >= cxx0x); parser->non_integral_constant_expression_p = false; /* Although the grammar says "conditional-expression", we parse an "assignment-expression", which also permits "throw-expression" and the use of assignment operators. In the case that ALLOW_NON_CONSTANT_P is false, we get better errors than we would otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is actually essential that we look for an assignment-expression. For example, cp_parser_initializer_clauses uses this function to determine whether a particular assignment-expression is in fact constant. */ expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); /* Restore the old settings. */ parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->allow_non_integral_constant_expression_p = saved_allow_non_integral_constant_expression_p; if (cxx_dialect >= cxx0x) { /* Require an rvalue constant expression here; that's what our callers expect. Reference constant expressions are handled separately in e.g. cp_parser_template_argument. */ bool is_const = potential_rvalue_constant_expression (expression); parser->non_integral_constant_expression_p = !is_const; if (!is_const && !allow_non_constant_p) require_potential_rvalue_constant_expression (expression); } if (allow_non_constant_p) *non_constant_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expression; } /* Parse __builtin_offsetof. offsetof-expression: "__builtin_offsetof" "(" type-id "," offsetof-member-designator ")" offsetof-member-designator: id-expression | offsetof-member-designator "." id-expression | offsetof-member-designator "[" expression "]" | offsetof-member-designator "->" id-expression */ static tree cp_parser_builtin_offsetof (cp_parser *parser) { int save_ice_p, save_non_ice_p; tree type, expr; cp_id_kind dummy; cp_token *token; /* We're about to accept non-integral-constant things, but will definitely yield an integral constant expression. Save and restore these values around our local parsing. */ save_ice_p = parser->integral_constant_expression_p; save_non_ice_p = parser->non_integral_constant_expression_p; /* Consume the "__builtin_offsetof" token. */ cp_lexer_consume_token (parser->lexer); /* Consume the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); token = cp_lexer_peek_token (parser->lexer); /* Build the (type *)null that begins the traditional offsetof macro. */ expr = build_static_cast (build_pointer_type (type), null_pointer_node, tf_warning_or_error); /* Parse the offsetof-member-designator. We begin as if we saw "expr->". */ expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr, true, &dummy, token->location); while (true) { token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: /* offsetof-member-designator "[" expression "]" */ expr = cp_parser_postfix_open_square_expression (parser, expr, true); break; case CPP_DEREF: /* offsetof-member-designator "->" identifier */ expr = grok_array_decl (expr, integer_zero_node); /* FALLTHRU */ case CPP_DOT: /* offsetof-member-designator "." identifier */ cp_lexer_consume_token (parser->lexer); expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT, expr, true, &dummy, token->location); break; case CPP_CLOSE_PAREN: /* Consume the ")" token. */ cp_lexer_consume_token (parser->lexer); goto success; default: /* Error. We know the following require will fail, but that gives the proper error message. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_skip_to_closing_parenthesis (parser, true, false, true); expr = error_mark_node; goto failure; } } success: /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) expr = build1 (OFFSETOF_EXPR, size_type_node, expr); else expr = finish_offsetof (expr); failure: parser->integral_constant_expression_p = save_ice_p; parser->non_integral_constant_expression_p = save_non_ice_p; return expr; } /* Parse a trait expression. Returns a representation of the expression, the underlying type of the type at issue when KEYWORD is RID_UNDERLYING_TYPE. */ static tree cp_parser_trait_expr (cp_parser* parser, enum rid keyword) { cp_trait_kind kind; tree type1, type2 = NULL_TREE; bool binary = false; cp_decl_specifier_seq decl_specs; switch (keyword) { case RID_HAS_NOTHROW_ASSIGN: kind = CPTK_HAS_NOTHROW_ASSIGN; break; case RID_HAS_NOTHROW_CONSTRUCTOR: kind = CPTK_HAS_NOTHROW_CONSTRUCTOR; break; case RID_HAS_NOTHROW_COPY: kind = CPTK_HAS_NOTHROW_COPY; break; case RID_HAS_TRIVIAL_ASSIGN: kind = CPTK_HAS_TRIVIAL_ASSIGN; break; case RID_HAS_TRIVIAL_CONSTRUCTOR: kind = CPTK_HAS_TRIVIAL_CONSTRUCTOR; break; case RID_HAS_TRIVIAL_COPY: kind = CPTK_HAS_TRIVIAL_COPY; break; case RID_HAS_TRIVIAL_DESTRUCTOR: kind = CPTK_HAS_TRIVIAL_DESTRUCTOR; break; case RID_HAS_VIRTUAL_DESTRUCTOR: kind = CPTK_HAS_VIRTUAL_DESTRUCTOR; break; case RID_IS_ABSTRACT: kind = CPTK_IS_ABSTRACT; break; case RID_IS_BASE_OF: kind = CPTK_IS_BASE_OF; binary = true; break; case RID_IS_CLASS: kind = CPTK_IS_CLASS; break; case RID_IS_CONVERTIBLE_TO: kind = CPTK_IS_CONVERTIBLE_TO; binary = true; break; case RID_IS_EMPTY: kind = CPTK_IS_EMPTY; break; case RID_IS_ENUM: kind = CPTK_IS_ENUM; break; case RID_IS_FINAL: kind = CPTK_IS_FINAL; break; case RID_IS_LITERAL_TYPE: kind = CPTK_IS_LITERAL_TYPE; break; case RID_IS_POD: kind = CPTK_IS_POD; break; case RID_IS_POLYMORPHIC: kind = CPTK_IS_POLYMORPHIC; break; case RID_IS_STD_LAYOUT: kind = CPTK_IS_STD_LAYOUT; break; case RID_IS_TRIVIAL: kind = CPTK_IS_TRIVIAL; break; case RID_IS_UNION: kind = CPTK_IS_UNION; break; case RID_UNDERLYING_TYPE: kind = CPTK_UNDERLYING_TYPE; break; case RID_BASES: kind = CPTK_BASES; break; case RID_DIRECT_BASES: kind = CPTK_DIRECT_BASES; break; default: gcc_unreachable (); } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); type1 = cp_parser_type_id (parser); if (type1 == error_mark_node) return error_mark_node; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type1; /* Call grokdeclarator to figure out what type this is. */ type1 = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); if (binary) { cp_parser_require (parser, CPP_COMMA, RT_COMMA); type2 = cp_parser_type_id (parser); if (type2 == error_mark_node) return error_mark_node; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type2; /* Call grokdeclarator to figure out what type this is. */ type2 = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Complete the trait expression, which may mean either processing the trait expr now or saving it for template instantiation. */ switch(kind) { case CPTK_UNDERLYING_TYPE: return finish_underlying_type (type1); case CPTK_BASES: return finish_bases (type1, false); case CPTK_DIRECT_BASES: return finish_bases (type1, true); default: return finish_trait_expr (kind, type1, type2); } } /* Lambdas that appear in variable initializer or default argument scope get that in their mangling, so we need to record it. We might as well use the count for function and namespace scopes as well. */ static GTY(()) tree lambda_scope; static GTY(()) int lambda_count; typedef struct GTY(()) tree_int { tree t; int i; } tree_int; DEF_VEC_O(tree_int); DEF_VEC_ALLOC_O(tree_int,gc); static GTY(()) VEC(tree_int,gc) *lambda_scope_stack; static void start_lambda_scope (tree decl) { tree_int ti; gcc_assert (decl); /* Once we're inside a function, we ignore other scopes and just push the function again so that popping works properly. */ if (current_function_decl && TREE_CODE (decl) != FUNCTION_DECL) decl = current_function_decl; ti.t = lambda_scope; ti.i = lambda_count; VEC_safe_push (tree_int, gc, lambda_scope_stack, &ti); if (lambda_scope != decl) { /* Don't reset the count if we're still in the same function. */ lambda_scope = decl; lambda_count = 0; } } static void record_lambda_scope (tree lambda) { LAMBDA_EXPR_EXTRA_SCOPE (lambda) = lambda_scope; LAMBDA_EXPR_DISCRIMINATOR (lambda) = lambda_count++; } static void finish_lambda_scope (void) { tree_int *p = VEC_last (tree_int, lambda_scope_stack); if (lambda_scope != p->t) { lambda_scope = p->t; lambda_count = p->i; } VEC_pop (tree_int, lambda_scope_stack); } /* Parse a lambda expression. lambda-expression: lambda-introducer lambda-declarator [opt] compound-statement Returns a representation of the expression. */ static tree cp_parser_lambda_expression (cp_parser* parser) { tree lambda_expr = build_lambda_expr (); tree type; bool ok; LAMBDA_EXPR_LOCATION (lambda_expr) = cp_lexer_peek_token (parser->lexer)->location; if (cp_unevaluated_operand) error_at (LAMBDA_EXPR_LOCATION (lambda_expr), "lambda-expression in unevaluated context"); /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); cp_parser_lambda_introducer (parser, lambda_expr); type = begin_lambda_type (lambda_expr); if (type == error_mark_node) return error_mark_node; record_lambda_scope (lambda_expr); /* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */ determine_visibility (TYPE_NAME (type)); /* Now that we've started the type, add the capture fields for any explicit captures. */ register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr)); { /* Inside the class, surrounding template-parameter-lists do not apply. */ unsigned int saved_num_template_parameter_lists = parser->num_template_parameter_lists; unsigned char in_statement = parser->in_statement; bool in_switch_statement_p = parser->in_switch_statement_p; parser->num_template_parameter_lists = 0; parser->in_statement = 0; parser->in_switch_statement_p = false; /* By virtue of defining a local class, a lambda expression has access to the private variables of enclosing classes. */ ok = cp_parser_lambda_declarator_opt (parser, lambda_expr); if (ok) cp_parser_lambda_body (parser, lambda_expr); else if (cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) cp_parser_skip_to_end_of_block_or_statement (parser); /* The capture list was built up in reverse order; fix that now. */ { tree newlist = NULL_TREE; tree elt, next; for (elt = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); elt; elt = next) { next = TREE_CHAIN (elt); TREE_CHAIN (elt) = newlist; newlist = elt; } LAMBDA_EXPR_CAPTURE_LIST (lambda_expr) = newlist; } if (ok) maybe_add_lambda_conv_op (type); type = finish_struct (type, /*attributes=*/NULL_TREE); parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_statement = in_statement; parser->in_switch_statement_p = in_switch_statement_p; } pop_deferring_access_checks (); /* This field is only used during parsing of the lambda. */ LAMBDA_EXPR_THIS_CAPTURE (lambda_expr) = NULL_TREE; /* This lambda shouldn't have any proxies left at this point. */ gcc_assert (LAMBDA_EXPR_PENDING_PROXIES (lambda_expr) == NULL); /* And now that we're done, push proxies for an enclosing lambda. */ insert_pending_capture_proxies (); if (ok) return build_lambda_object (lambda_expr); else return error_mark_node; } /* Parse the beginning of a lambda expression. lambda-introducer: [ lambda-capture [opt] ] LAMBDA_EXPR is the current representation of the lambda expression. */ static void cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr) { /* Need commas after the first capture. */ bool first = true; /* Eat the leading `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE); /* Record default capture mode. "[&" "[=" "[&," "[=," */ if (cp_lexer_next_token_is (parser->lexer, CPP_AND) && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_NAME) LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_REFERENCE; else if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_COPY; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE) { cp_lexer_consume_token (parser->lexer); first = false; } while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE)) { cp_token* capture_token; tree capture_id; tree capture_init_expr; cp_id_kind idk = CP_ID_KIND_NONE; bool explicit_init_p = false; enum capture_kind_type { BY_COPY, BY_REFERENCE }; enum capture_kind_type capture_kind = BY_COPY; if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { error ("expected end of capture-list"); return; } if (first) first = false; else cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Possibly capture `this'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THIS)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY) pedwarn (loc, 0, "explicit by-copy capture of %<this%> redundant " "with by-copy capture default"); cp_lexer_consume_token (parser->lexer); add_capture (lambda_expr, /*id=*/this_identifier, /*initializer=*/finish_this_expr(), /*by_reference_p=*/false, explicit_init_p); continue; } /* Remember whether we want to capture as a reference or not. */ if (cp_lexer_next_token_is (parser->lexer, CPP_AND)) { capture_kind = BY_REFERENCE; cp_lexer_consume_token (parser->lexer); } /* Get the identifier. */ capture_token = cp_lexer_peek_token (parser->lexer); capture_id = cp_parser_identifier (parser); if (capture_id == error_mark_node) /* Would be nice to have a cp_parser_skip_to_closing_x for general delimiters, but I modified this to stop on unnested ']' as well. It was already changed to stop on unnested '}', so the "closing_parenthesis" name is no more misleading with my change. */ { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); break; } /* Find the initializer for this capture. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* An explicit expression exists. */ cp_lexer_consume_token (parser->lexer); pedwarn (input_location, OPT_pedantic, "ISO C++ does not allow initializers " "in lambda expression capture lists"); capture_init_expr = cp_parser_assignment_expression (parser, /*cast_p=*/true, &idk); explicit_init_p = true; } else { const char* error_msg; /* Turn the identifier into an id-expression. */ capture_init_expr = cp_parser_lookup_name (parser, capture_id, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, capture_token->location); if (capture_init_expr == error_mark_node) { unqualified_name_lookup_error (capture_id); continue; } else if (DECL_P (capture_init_expr) && (TREE_CODE (capture_init_expr) != VAR_DECL && TREE_CODE (capture_init_expr) != PARM_DECL)) { error_at (capture_token->location, "capture of non-variable %qD ", capture_init_expr); inform (0, "%q+#D declared here", capture_init_expr); continue; } if (TREE_CODE (capture_init_expr) == VAR_DECL && decl_storage_duration (capture_init_expr) != dk_auto) { pedwarn (capture_token->location, 0, "capture of variable " "%qD with non-automatic storage duration", capture_init_expr); inform (0, "%q+#D declared here", capture_init_expr); continue; } capture_init_expr = finish_id_expression (capture_id, capture_init_expr, parser->scope, &idk, /*integral_constant_expression_p=*/false, /*allow_non_integral_constant_expression_p=*/false, /*non_integral_constant_expression_p=*/NULL, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, capture_token->location); } if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE && !explicit_init_p) { if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY && capture_kind == BY_COPY) pedwarn (capture_token->location, 0, "explicit by-copy capture " "of %qD redundant with by-copy capture default", capture_id); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_REFERENCE && capture_kind == BY_REFERENCE) pedwarn (capture_token->location, 0, "explicit by-reference " "capture of %qD redundant with by-reference capture " "default", capture_id); } add_capture (lambda_expr, capture_id, capture_init_expr, /*by_reference_p=*/capture_kind == BY_REFERENCE, explicit_init_p); } cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); } /* Parse the (optional) middle of a lambda expression. lambda-declarator: ( parameter-declaration-clause [opt] ) attribute-specifier [opt] mutable [opt] exception-specification [opt] lambda-return-type-clause [opt] LAMBDA_EXPR is the current representation of the lambda expression. */ static bool cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr) { /* 5.1.1.4 of the standard says: If a lambda-expression does not include a lambda-declarator, it is as if the lambda-declarator were (). This means an empty parameter list, no attributes, and no exception specification. */ tree param_list = void_list_node; tree attributes = NULL_TREE; tree exception_spec = NULL_TREE; tree t; /* The lambda-declarator is optional, but must begin with an opening parenthesis if present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); begin_scope (sk_function_parms, /*entity=*/NULL_TREE); /* Parse parameters. */ param_list = cp_parser_parameter_declaration_clause (parser); /* Default arguments shall not be specified in the parameter-declaration-clause of a lambda-declarator. */ for (t = param_list; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) pedwarn (DECL_SOURCE_LOCATION (TREE_VALUE (t)), OPT_pedantic, "default argument specified for lambda parameter"); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); attributes = cp_parser_attributes_opt (parser); /* Parse optional `mutable' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_MUTABLE)) { cp_lexer_consume_token (parser->lexer); LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1; } /* Parse optional exception specification. */ exception_spec = cp_parser_exception_specification_opt (parser); /* Parse optional trailing return type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_DEREF)) { cp_lexer_consume_token (parser->lexer); LAMBDA_EXPR_RETURN_TYPE (lambda_expr) = cp_parser_type_id (parser); } /* The function parameters must be in scope all the way until after the trailing-return-type in case of decltype. */ for (t = current_binding_level->names; t; t = DECL_CHAIN (t)) pop_binding (DECL_NAME (t), t); leave_scope (); } /* Create the function call operator. Messing with declarators like this is no uglier than building up the FUNCTION_DECL by hand, and this is less likely to get out of sync with other code. */ { cp_decl_specifier_seq return_type_specs; cp_declarator* declarator; tree fco; int quals; void *p; clear_decl_specs (&return_type_specs); if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr)) return_type_specs.type = LAMBDA_EXPR_RETURN_TYPE (lambda_expr); else /* Maybe we will deduce the return type later, but we can use void as a placeholder return type anyways. */ return_type_specs.type = void_type_node; p = obstack_alloc (&declarator_obstack, 0); declarator = make_id_declarator (NULL_TREE, ansi_opname (CALL_EXPR), sfk_none); quals = (LAMBDA_EXPR_MUTABLE_P (lambda_expr) ? TYPE_UNQUALIFIED : TYPE_QUAL_CONST); declarator = make_call_declarator (declarator, param_list, quals, VIRT_SPEC_UNSPECIFIED, exception_spec, /*late_return_type=*/NULL_TREE); declarator->id_loc = LAMBDA_EXPR_LOCATION (lambda_expr); fco = grokmethod (&return_type_specs, declarator, attributes); if (fco != error_mark_node) { DECL_INITIALIZED_IN_CLASS_P (fco) = 1; DECL_ARTIFICIAL (fco) = 1; /* Give the object parameter a different name. */ DECL_NAME (DECL_ARGUMENTS (fco)) = get_identifier ("__closure"); } finish_member_declaration (fco); obstack_free (&declarator_obstack, p); return (fco != error_mark_node); } } /* Parse the body of a lambda expression, which is simply compound-statement but which requires special handling. LAMBDA_EXPR is the current representation of the lambda expression. */ static void cp_parser_lambda_body (cp_parser* parser, tree lambda_expr) { bool nested = (current_function_decl != NULL_TREE); bool local_variables_forbidden_p = parser->local_variables_forbidden_p; if (nested) push_function_context (); else /* Still increment function_depth so that we don't GC in the middle of an expression. */ ++function_depth; /* Clear this in case we're in the middle of a default argument. */ parser->local_variables_forbidden_p = false; /* Finish the function call operator - class_specifier + late_parsing_for_member + function_definition_after_declarator + ctor_initializer_opt_and_function_body */ { tree fco = lambda_function (lambda_expr); tree body; bool done = false; tree compound_stmt; tree cap; /* Let the front end know that we are going to be defining this function. */ start_preparsed_function (fco, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); start_lambda_scope (fco); body = begin_function_body (); if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) goto out; /* Push the proxies for any explicit captures. */ for (cap = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); cap; cap = TREE_CHAIN (cap)) build_capture_proxy (TREE_PURPOSE (cap)); compound_stmt = begin_compound_stmt (0); /* 5.1.1.4 of the standard says: If a lambda-expression does not include a trailing-return-type, it is as if the trailing-return-type denotes the following type: * if the compound-statement is of the form { return attribute-specifier [opt] expression ; } the type of the returned expression after lvalue-to-rvalue conversion (_conv.lval_ 4.1), array-to-pointer conversion (_conv.array_ 4.2), and function-to-pointer conversion (_conv.func_ 4.3); * otherwise, void. */ /* In a lambda that has neither a lambda-return-type-clause nor a deducible form, errors should be reported for return statements in the body. Since we used void as the placeholder return type, parsing the body as usual will give such desired behavior. */ if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr) && cp_lexer_peek_nth_token (parser->lexer, 1)->keyword == RID_RETURN && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SEMICOLON) { tree expr = NULL_TREE; cp_id_kind idk = CP_ID_KIND_NONE; /* Parse tentatively in case there's more after the initial return statement. */ cp_parser_parse_tentatively (parser); cp_parser_require_keyword (parser, RID_RETURN, RT_RETURN); expr = cp_parser_expression (parser, /*cast_p=*/false, &idk); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); if (cp_parser_parse_definitely (parser)) { apply_lambda_return_type (lambda_expr, lambda_return_type (expr)); /* Will get error here if type not deduced yet. */ finish_return_stmt (expr); done = true; } } if (!done) { if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr)) LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = true; while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (lambda_expr) = false; } finish_compound_stmt (compound_stmt); out: finish_function_body (body); finish_lambda_scope (); /* Finish the function and generate code for it if necessary. */ expand_or_defer_fn (finish_function (/*inline*/2)); } parser->local_variables_forbidden_p = local_variables_forbidden_p; if (nested) pop_function_context(); else --function_depth; } /* Statements [gram.stmt.stmt] */ /* Parse a statement. statement: labeled-statement expression-statement compound-statement selection-statement iteration-statement jump-statement declaration-statement try-block TM Extension: statement: atomic-statement IN_COMPOUND is true when the statement is nested inside a cp_parser_compound_statement; this matters for certain pragmas. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void cp_parser_statement (cp_parser* parser, tree in_statement_expr, bool in_compound, bool *if_p) { tree statement; cp_token *token; location_t statement_location; restart: if (if_p != NULL) *if_p = false; /* There is no statement yet. */ statement = NULL_TREE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Remember the location of the first token in the statement. */ statement_location = token->location; /* If this is a keyword, then that will often determine what kind of statement we have. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_CASE: case RID_DEFAULT: /* Looks like a labeled-statement with a case label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser); goto restart; case RID_IF: case RID_SWITCH: statement = cp_parser_selection_statement (parser, if_p); break; case RID_WHILE: case RID_DO: case RID_FOR: statement = cp_parser_iteration_statement (parser); break; case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: statement = cp_parser_jump_statement (parser); break; /* Objective-C++ exception-handling constructs. */ case RID_AT_TRY: case RID_AT_CATCH: case RID_AT_FINALLY: case RID_AT_SYNCHRONIZED: case RID_AT_THROW: statement = cp_parser_objc_statement (parser); break; case RID_TRY: statement = cp_parser_try_block (parser); break; case RID_NAMESPACE: /* This must be a namespace alias definition. */ cp_parser_declaration_statement (parser); return; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: statement = cp_parser_transaction (parser, keyword); break; case RID_TRANSACTION_CANCEL: statement = cp_parser_transaction_cancel (parser); break; default: /* It might be a keyword like `int' that can start a declaration-statement. */ break; } } else if (token->type == CPP_NAME) { /* If the next token is a `:', then we are looking at a labeled-statement. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON) { /* Looks like a labeled-statement with an ordinary label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser); goto restart; } } /* Anything that starts with a `{' must be a compound-statement. */ else if (token->type == CPP_OPEN_BRACE) statement = cp_parser_compound_statement (parser, NULL, false, false); /* CPP_PRAGMA is a #pragma inside a function body, which constitutes a statement all its own. */ else if (token->type == CPP_PRAGMA) { /* Only certain OpenMP pragmas are attached to statements, and thus are considered statements themselves. All others are not. In the context of a compound, accept the pragma as a "statement" and return so that we can check for a close brace. Otherwise we require a real statement and must go back and read one. */ if (in_compound) cp_parser_pragma (parser, pragma_compound); else if (!cp_parser_pragma (parser, pragma_stmt)) goto restart; return; } else if (token->type == CPP_EOF) { cp_parser_error (parser, "expected statement"); return; } /* Everything else must be a declaration-statement or an expression-statement. Try for the declaration-statement first, unless we are looking at a `;', in which case we know that we have an expression-statement. */ if (!statement) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_parse_tentatively (parser); /* Try to parse the declaration-statement. */ cp_parser_declaration_statement (parser); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return; } /* Look for an expression-statement instead. */ statement = cp_parser_expression_statement (parser, in_statement_expr); } /* Set the line number for the statement. */ if (statement && STATEMENT_CODE_P (TREE_CODE (statement))) SET_EXPR_LOCATION (statement, statement_location); } /* Parse the label for a labeled-statement, i.e. identifier : case constant-expression : default : GNU Extension: case constant-expression ... constant-expression : statement When a label is parsed without errors, the label is added to the parse tree by the finish_* functions, so this function doesn't have to return the label. */ static void cp_parser_label_for_labeled_statement (cp_parser* parser) { cp_token *token; tree label = NULL_TREE; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; /* The next token should be an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_KEYWORD) { cp_parser_error (parser, "expected labeled-statement"); return; } parser->colon_corrects_to_scope_p = false; switch (token->keyword) { case RID_CASE: { tree expr, expr_hi; cp_token *ellipsis; /* Consume the `case' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the constant-expression. */ expr = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); ellipsis = cp_lexer_peek_token (parser->lexer); if (ellipsis->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); expr_hi = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); /* We don't need to emit warnings here, as the common code will do this for us. */ } else expr_hi = NULL_TREE; if (parser->in_switch_statement_p) finish_case_label (token->location, expr, expr_hi); else error_at (token->location, "case label %qE not within a switch statement", expr); } break; case RID_DEFAULT: /* Consume the `default' token. */ cp_lexer_consume_token (parser->lexer); if (parser->in_switch_statement_p) finish_case_label (token->location, NULL_TREE, NULL_TREE); else error_at (token->location, "case label not within a switch statement"); break; default: /* Anything else must be an ordinary label. */ label = finish_label_stmt (cp_parser_identifier (parser)); break; } /* Require the `:' token. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* An ordinary label may optionally be followed by attributes. However, this is only permitted if the attributes are then followed by a semicolon. This is because, for backward compatibility, when parsing lab: __attribute__ ((unused)) int i; we want the attribute to attach to "i", not "lab". */ if (label != NULL_TREE && cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) { tree attrs; cp_parser_parse_tentatively (parser); attrs = cp_parser_attributes_opt (parser); if (attrs == NULL_TREE || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_abort_tentative_parse (parser); else if (!cp_parser_parse_definitely (parser)) ; else cplus_decl_attributes (&label, attrs, 0); } parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* Parse an expression-statement. expression-statement: expression [opt] ; Returns the new EXPR_STMT -- or NULL_TREE if the expression statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P indicates whether this expression-statement is part of an expression statement. */ static tree cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr) { tree statement = NULL_TREE; cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is a ';', then there is no expression statement. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) statement = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* Give a helpful message for "A<T>::type t;" and the like. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) { if (TREE_CODE (statement) == SCOPE_REF) error_at (token->location, "need %<typename%> before %qE because " "%qT is a dependent scope", statement, TREE_OPERAND (statement, 0)); else if (is_overloaded_fn (statement) && DECL_CONSTRUCTOR_P (get_first_fn (statement))) { /* A::A a; */ tree fn = get_first_fn (statement); error_at (token->location, "%<%T::%D%> names the constructor, not the type", DECL_CONTEXT (fn), DECL_NAME (fn)); } } /* Consume the final `;'. */ cp_parser_consume_semicolon_at_end_of_statement (parser); if (in_statement_expr && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) /* This is the final expression statement of a statement expression. */ statement = finish_stmt_expr_expr (statement, in_statement_expr); else if (statement) statement = finish_expr_stmt (statement); else finish_stmt (); return statement; } /* Parse a compound-statement. compound-statement: { statement-seq [opt] } GNU extension: compound-statement: { label-declaration-seq [opt] statement-seq [opt] } label-declaration-seq: label-declaration label-declaration-seq label-declaration Returns a tree representing the statement. */ static tree cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr, bool in_try, bool function_body) { tree compound_stmt; /* Consume the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) return error_mark_node; if (DECL_DECLARED_CONSTEXPR_P (current_function_decl) && !function_body) pedwarn (input_location, OPT_pedantic, "compound-statement in constexpr function"); /* Begin the compound-statement. */ compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0); /* If the next keyword is `__label__' we have a label declaration. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, in_statement_expr); /* Finish the compound-statement. */ finish_compound_stmt (compound_stmt); /* Consume the `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); return compound_stmt; } /* Parse an (optional) statement-seq. statement-seq: statement statement-seq [opt] statement */ static void cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr) { /* Scan statements until there aren't any more. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If we are looking at a `}', then we have run out of statements; the same is true if we have reached the end of file, or have stumbled upon a stray '@end'. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL || (token->type == CPP_KEYWORD && token->keyword == RID_AT_END)) break; /* If we are in a compound statement and find 'else' then something went wrong. */ else if (token->type == CPP_KEYWORD && token->keyword == RID_ELSE) { if (parser->in_statement & IN_IF_STMT) break; else { token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "%<else%> without a previous %<if%>"); } } /* Parse the statement. */ cp_parser_statement (parser, in_statement_expr, true, NULL); } } /* Parse a selection-statement. selection-statement: if ( condition ) statement if ( condition ) statement else statement switch ( condition ) statement Returns the new IF_STMT or SWITCH_STMT. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static tree cp_parser_selection_statement (cp_parser* parser, bool *if_p) { cp_token *token; enum rid keyword; if (if_p != NULL) *if_p = false; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_SELECT); /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_IF: case RID_SWITCH: { tree statement; tree condition; /* Look for the `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } /* Begin the selection-statement. */ if (keyword == RID_IF) statement = begin_if_stmt (); else statement = begin_switch_stmt (); /* Parse the condition. */ condition = cp_parser_condition (parser); /* Look for the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); if (keyword == RID_IF) { bool nested_if; unsigned char in_statement; /* Add the condition. */ finish_if_stmt_cond (condition, statement); /* Parse the then-clause. */ in_statement = parser->in_statement; parser->in_statement |= IN_IF_STMT; if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; add_stmt (build_empty_stmt (loc)); cp_lexer_consume_token (parser->lexer); if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around " "empty body in an %<if%> statement"); nested_if = false; } else cp_parser_implicitly_scoped_statement (parser, &nested_if); parser->in_statement = in_statement; finish_then_clause (statement); /* If the next token is `else', parse the else-clause. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) { /* Consume the `else' keyword. */ cp_lexer_consume_token (parser->lexer); begin_else_clause (statement); /* Parse the else-clause. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around " "empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); cp_lexer_consume_token (parser->lexer); } else cp_parser_implicitly_scoped_statement (parser, NULL); finish_else_clause (statement); /* If we are currently parsing a then-clause, then IF_P will not be NULL. We set it to true to indicate that this if statement has an else clause. This may trigger the Wparentheses warning below when we get back up to the parent if statement. */ if (if_p != NULL) *if_p = true; } else { /* This if statement does not have an else clause. If NESTED_IF is true, then the then-clause is an if statement which does have an else clause. We warn about the potential ambiguity. */ if (nested_if) warning_at (EXPR_LOCATION (statement), OPT_Wparentheses, "suggest explicit braces to avoid ambiguous" " %<else%>"); } /* Now we're all done with the if-statement. */ finish_if_stmt (statement); } else { bool in_switch_statement_p; unsigned char in_statement; /* Add the condition. */ finish_switch_cond (condition, statement); /* Parse the body of the switch-statement. */ in_switch_statement_p = parser->in_switch_statement_p; in_statement = parser->in_statement; parser->in_switch_statement_p = true; parser->in_statement |= IN_SWITCH_STMT; cp_parser_implicitly_scoped_statement (parser, NULL); parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; /* Now we're all done with the switch-statement. */ finish_switch_stmt (statement); } return statement; } break; default: cp_parser_error (parser, "expected selection-statement"); return error_mark_node; } } /* Parse a condition. condition: expression type-specifier-seq declarator = initializer-clause type-specifier-seq declarator braced-init-list GNU Extension: condition: type-specifier-seq declarator asm-specification [opt] attributes [opt] = assignment-expression Returns the expression that should be tested. */ static tree cp_parser_condition (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; const char *saved_message; int declares_class_or_enum; /* Try the declaration first. */ cp_parser_parse_tentatively (parser); /* New types are not allowed in the type-specifier-seq for a condition. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in conditions"); /* Parse the type-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR, &type_specifiers, &declares_class_or_enum); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* If all is well, we might be looking at a declaration. */ if (!cp_parser_error_occurred (parser)) { tree decl; tree asm_specification; tree attributes; cp_declarator *declarator; tree initializer = NULL_TREE; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* If the next token is not an `=' or '{', then we might still be looking at an expression. For example: if (A(a).x) looks like a decl-specifier-seq and a declarator -- but then there is no `=', so this is an expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_simulate_error (parser); /* If we did see an `=' or '{', then we are looking at a declaration for sure. */ if (cp_parser_parse_definitely (parser)) { tree pushed_scope; bool non_constant_p; bool flags = LOOKUP_ONLYCONVERTING; /* Create the declaration. */ decl = start_decl (declarator, &type_specifiers, /*initialized_p=*/true, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); /* Parse the initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_braced_list (parser, &non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (initializer) = 1; flags = 0; } else { /* Consume the `='. */ cp_parser_require (parser, CPP_EQ, RT_EQ); initializer = cp_parser_initializer_clause (parser, &non_constant_p); } if (BRACE_ENCLOSED_INITIALIZER_P (initializer)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); /* Process the initializer. */ cp_finish_decl (decl, initializer, !non_constant_p, asm_specification, flags); if (pushed_scope) pop_scope (pushed_scope); return convert_from_reference (decl); } } /* If we didn't even get past the declarator successfully, we are definitely not looking at a declaration. */ else cp_parser_abort_tentative_parse (parser); /* Otherwise, we are looking at an expression. */ return cp_parser_expression (parser, /*cast_p=*/false, NULL); } /* Parses a for-statement or range-for-statement until the closing ')', not included. */ static tree cp_parser_for (cp_parser *parser) { tree init, scope, decl; bool is_range_for; /* Begin the for-statement. */ scope = begin_for_scope (&init); /* Parse the initialization. */ is_range_for = cp_parser_for_init_statement (parser, &decl); if (is_range_for) return cp_parser_range_for (parser, scope, init, decl); else return cp_parser_c_for (parser, scope, init); } static tree cp_parser_c_for (cp_parser *parser, tree scope, tree init) { /* Normal for loop */ tree condition = NULL_TREE; tree expression = NULL_TREE; tree stmt; stmt = begin_for_stmt (scope, init); /* The for-init-statement has already been parsed in cp_parser_for_init_statement, so no work is needed here. */ finish_for_init_stmt (stmt); /* If there's a condition, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) condition = cp_parser_condition (parser); finish_for_cond (condition, stmt); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* If there's an expression, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); finish_for_expr (expression, stmt); return stmt; } /* Tries to parse a range-based for-statement: range-based-for: decl-specifier-seq declarator : expression The decl-specifier-seq declarator and the `:' are already parsed by cp_parser_for_init_statement. If processing_template_decl it returns a newly created RANGE_FOR_STMT; if not, it is converted to a regular FOR_STMT. */ static tree cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl) { tree stmt, range_expr; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_non_constant_p; range_expr = cp_parser_braced_list (parser, &expr_non_constant_p); } else range_expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* If in template, STMT is converted to a normal for-statement at instantiation. If not, it is done just ahead. */ if (processing_template_decl) { if (check_for_bare_parameter_packs (range_expr)) range_expr = error_mark_node; stmt = begin_range_for_stmt (scope, init); finish_range_for_decl (stmt, range_decl, range_expr); if (range_expr != error_mark_node && !type_dependent_expression_p (range_expr) /* The length of an array might be dependent. */ && COMPLETE_TYPE_P (TREE_TYPE (range_expr)) /* do_auto_deduction doesn't mess with template init-lists. */ && !BRACE_ENCLOSED_INITIALIZER_P (range_expr)) do_range_for_auto_deduction (range_decl, range_expr); } else { stmt = begin_for_stmt (scope, init); stmt = cp_convert_range_for (stmt, range_decl, range_expr); } return stmt; } /* Subroutine of cp_convert_range_for: given the initializer expression, builds up the range temporary. */ static tree build_range_temp (tree range_expr) { tree range_type, range_temp; /* Find out the type deduced by the declaration `auto &&__range = range_expr'. */ range_type = cp_build_reference_type (make_auto (), true); range_type = do_auto_deduction (range_type, range_expr, type_uses_auto (range_type)); /* Create the __range variable. */ range_temp = build_decl (input_location, VAR_DECL, get_identifier ("__for_range"), range_type); TREE_USED (range_temp) = 1; DECL_ARTIFICIAL (range_temp) = 1; return range_temp; } /* Used by cp_parser_range_for in template context: we aren't going to do a full conversion yet, but we still need to resolve auto in the type of the for-range-declaration if present. This is basically a shortcut version of cp_convert_range_for. */ static void do_range_for_auto_deduction (tree decl, tree range_expr) { tree auto_node = type_uses_auto (TREE_TYPE (decl)); if (auto_node) { tree begin_dummy, end_dummy, range_temp, iter_type, iter_decl; range_temp = convert_from_reference (build_range_temp (range_expr)); iter_type = (cp_parser_perform_range_for_lookup (range_temp, &begin_dummy, &end_dummy)); iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE, iter_type); iter_decl = build_x_indirect_ref (iter_decl, RO_NULL, tf_warning_or_error); TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), iter_decl, auto_node); } } /* Converts a range-based for-statement into a normal for-statement, as per the definition. for (RANGE_DECL : RANGE_EXPR) BLOCK should be equivalent to: { auto &&__range = RANGE_EXPR; for (auto __begin = BEGIN_EXPR, end = END_EXPR; __begin != __end; ++__begin) { RANGE_DECL = *__begin; BLOCK } } If RANGE_EXPR is an array: BEGIN_EXPR = __range END_EXPR = __range + ARRAY_SIZE(__range) Else if RANGE_EXPR has a member 'begin' or 'end': BEGIN_EXPR = __range.begin() END_EXPR = __range.end() Else: BEGIN_EXPR = begin(__range) END_EXPR = end(__range); If __range has a member 'begin' but not 'end', or vice versa, we must still use the second alternative (it will surely fail, however). When calling begin()/end() in the third alternative we must use argument dependent lookup, but always considering 'std' as an associated namespace. */ tree cp_convert_range_for (tree statement, tree range_decl, tree range_expr) { tree begin, end; tree iter_type, begin_expr, end_expr; tree condition, expression; if (range_decl == error_mark_node || range_expr == error_mark_node) /* If an error happened previously do nothing or else a lot of unhelpful errors would be issued. */ begin_expr = end_expr = iter_type = error_mark_node; else { tree range_temp = build_range_temp (range_expr); pushdecl (range_temp); cp_finish_decl (range_temp, range_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); range_temp = convert_from_reference (range_temp); iter_type = cp_parser_perform_range_for_lookup (range_temp, &begin_expr, &end_expr); } /* The new for initialization statement. */ begin = build_decl (input_location, VAR_DECL, get_identifier ("__for_begin"), iter_type); TREE_USED (begin) = 1; DECL_ARTIFICIAL (begin) = 1; pushdecl (begin); cp_finish_decl (begin, begin_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); end = build_decl (input_location, VAR_DECL, get_identifier ("__for_end"), iter_type); TREE_USED (end) = 1; DECL_ARTIFICIAL (end) = 1; pushdecl (end); cp_finish_decl (end, end_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); finish_for_init_stmt (statement); /* The new for condition. */ condition = build_x_binary_op (NE_EXPR, begin, ERROR_MARK, end, ERROR_MARK, NULL, tf_warning_or_error); finish_for_cond (condition, statement); /* The new increment expression. */ expression = finish_unary_op_expr (PREINCREMENT_EXPR, begin); finish_for_expr (expression, statement); /* The declaration is initialized with *__begin inside the loop body. */ cp_finish_decl (range_decl, build_x_indirect_ref (begin, RO_NULL, tf_warning_or_error), /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); return statement; } /* Solves BEGIN_EXPR and END_EXPR as described in cp_convert_range_for. We need to solve both at the same time because the method used depends on the existence of members begin or end. Returns the type deduced for the iterator expression. */ static tree cp_parser_perform_range_for_lookup (tree range, tree *begin, tree *end) { if (error_operand_p (range)) { *begin = *end = error_mark_node; return error_mark_node; } if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (range)))) { error ("range-based %<for%> expression of type %qT " "has incomplete type", TREE_TYPE (range)); *begin = *end = error_mark_node; return error_mark_node; } if (TREE_CODE (TREE_TYPE (range)) == ARRAY_TYPE) { /* If RANGE is an array, we will use pointer arithmetic. */ *begin = range; *end = build_binary_op (input_location, PLUS_EXPR, range, array_type_nelts_top (TREE_TYPE (range)), 0); return build_pointer_type (TREE_TYPE (TREE_TYPE (range))); } else { /* If it is not an array, we must do a bit of magic. */ tree id_begin, id_end; tree member_begin, member_end; *begin = *end = error_mark_node; id_begin = get_identifier ("begin"); id_end = get_identifier ("end"); member_begin = lookup_member (TREE_TYPE (range), id_begin, /*protect=*/2, /*want_type=*/false, tf_warning_or_error); member_end = lookup_member (TREE_TYPE (range), id_end, /*protect=*/2, /*want_type=*/false, tf_warning_or_error); if (member_begin != NULL_TREE || member_end != NULL_TREE) { /* Use the member functions. */ if (member_begin != NULL_TREE) *begin = cp_parser_range_for_member_function (range, id_begin); else error ("range-based %<for%> expression of type %qT has an " "%<end%> member but not a %<begin%>", TREE_TYPE (range)); if (member_end != NULL_TREE) *end = cp_parser_range_for_member_function (range, id_end); else error ("range-based %<for%> expression of type %qT has a " "%<begin%> member but not an %<end%>", TREE_TYPE (range)); } else { /* Use global functions with ADL. */ VEC(tree,gc) *vec; vec = make_tree_vector (); VEC_safe_push (tree, gc, vec, range); member_begin = perform_koenig_lookup (id_begin, vec, /*include_std=*/true, tf_warning_or_error); *begin = finish_call_expr (member_begin, &vec, false, true, tf_warning_or_error); member_end = perform_koenig_lookup (id_end, vec, /*include_std=*/true, tf_warning_or_error); *end = finish_call_expr (member_end, &vec, false, true, tf_warning_or_error); release_tree_vector (vec); } /* Last common checks. */ if (*begin == error_mark_node || *end == error_mark_node) { /* If one of the expressions is an error do no more checks. */ *begin = *end = error_mark_node; return error_mark_node; } else { tree iter_type = cv_unqualified (TREE_TYPE (*begin)); /* The unqualified type of the __begin and __end temporaries should be the same, as required by the multiple auto declaration. */ if (!same_type_p (iter_type, cv_unqualified (TREE_TYPE (*end)))) error ("inconsistent begin/end types in range-based %<for%> " "statement: %qT and %qT", TREE_TYPE (*begin), TREE_TYPE (*end)); return iter_type; } } } /* Helper function for cp_parser_perform_range_for_lookup. Builds a tree for RANGE.IDENTIFIER(). */ static tree cp_parser_range_for_member_function (tree range, tree identifier) { tree member, res; VEC(tree,gc) *vec; member = finish_class_member_access_expr (range, identifier, false, tf_warning_or_error); if (member == error_mark_node) return error_mark_node; vec = make_tree_vector (); res = finish_call_expr (member, &vec, /*disallow_virtual=*/false, /*koenig_p=*/false, tf_warning_or_error); release_tree_vector (vec); return res; } /* Parse an iteration-statement. iteration-statement: while ( condition ) statement do statement while ( expression ) ; for ( for-init-statement condition [opt] ; expression [opt] ) statement Returns the new WHILE_STMT, DO_STMT, FOR_STMT or RANGE_FOR_STMT. */ static tree cp_parser_iteration_statement (cp_parser* parser) { cp_token *token; enum rid keyword; tree statement; unsigned char in_statement; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_INTERATION); if (!token) return error_mark_node; /* Remember whether or not we are already within an iteration statement. */ in_statement = parser->in_statement; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_WHILE: { tree condition; /* Begin the while-statement. */ statement = begin_while_stmt (); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the condition. */ condition = cp_parser_condition (parser); finish_while_stmt_cond (condition, statement); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Parse the dependent statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the while-statement. */ finish_while_stmt (statement); } break; case RID_DO: { tree expression; /* Begin the do-statement. */ statement = begin_do_stmt (); /* Parse the body of the do-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_implicitly_scoped_statement (parser, NULL); parser->in_statement = in_statement; finish_do_body (statement); /* Look for the `while' keyword. */ cp_parser_require_keyword (parser, RID_WHILE, RT_WHILE); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* We're done with the do-statement. */ finish_do_stmt (expression, statement); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } break; case RID_FOR: { /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); statement = cp_parser_for (parser); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Parse the body of the for-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the for-statement. */ finish_for_stmt (statement); } break; default: cp_parser_error (parser, "expected iteration-statement"); statement = error_mark_node; break; } return statement; } /* Parse a for-init-statement or the declarator of a range-based-for. Returns true if a range-based-for declaration is seen. for-init-statement: expression-statement simple-declaration */ static bool cp_parser_for_init_statement (cp_parser* parser, tree *decl) { /* If the next token is a `;', then we have an empty expression-statement. Grammatically, this is also a simple-declaration, but an invalid one, because it does not declare anything. Therefore, if we did not handle this case specially, we would issue an error message about an invalid declaration. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { bool is_range_for = false; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* We're going to speculatively look for a declaration, falling back to an expression, if necessary. */ cp_parser_parse_tentatively (parser); /* Parse the declaration. */ cp_parser_simple_declaration (parser, /*function_definition_allowed_p=*/false, decl); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* It is a range-for, consume the ':' */ cp_lexer_consume_token (parser->lexer); is_range_for = true; if (cxx_dialect < cxx0x) { error_at (cp_lexer_peek_token (parser->lexer)->location, "range-based %<for%> loops are not allowed " "in C++98 mode"); *decl = error_mark_node; } } else /* The ';' is not consumed yet because we told cp_parser_simple_declaration not to. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (cp_parser_parse_definitely (parser)) return is_range_for; /* If the tentative parse failed, then we shall need to look for an expression-statement. */ } /* If we are here, it is an expression-statement. */ cp_parser_expression_statement (parser, NULL_TREE); return false; } /* Parse a jump-statement. jump-statement: break ; continue ; return expression [opt] ; return braced-init-list ; goto identifier ; GNU extension: jump-statement: goto * expression ; Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */ static tree cp_parser_jump_statement (cp_parser* parser) { tree statement = error_mark_node; cp_token *token; enum rid keyword; unsigned char in_statement; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_JUMP); if (!token) return error_mark_node; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_BREAK: in_statement = parser->in_statement & ~IN_IF_STMT; switch (in_statement) { case 0: error_at (token->location, "break statement not within loop or switch"); break; default: gcc_assert ((in_statement & IN_SWITCH_STMT) || in_statement == IN_ITERATION_STMT); statement = finish_break_stmt (); break; case IN_OMP_BLOCK: error_at (token->location, "invalid exit from OpenMP structured block"); break; case IN_OMP_FOR: error_at (token->location, "break statement used with OpenMP for loop"); break; } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; case RID_CONTINUE: switch (parser->in_statement & ~(IN_SWITCH_STMT | IN_IF_STMT)) { case 0: error_at (token->location, "continue statement not within a loop"); break; case IN_ITERATION_STMT: case IN_OMP_FOR: statement = finish_continue_stmt (); break; case IN_OMP_BLOCK: error_at (token->location, "invalid exit from OpenMP structured block"); break; default: gcc_unreachable (); } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; case RID_RETURN: { tree expr; bool expr_non_constant_p; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expr = cp_parser_braced_list (parser, &expr_non_constant_p); } else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); else /* If the next token is a `;', then there is no expression. */ expr = NULL_TREE; /* Build the return-statement. */ statement = finish_return_stmt (expr); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } break; case RID_GOTO: /* Create the goto-statement. */ if (cp_lexer_next_token_is (parser->lexer, CPP_MULT)) { /* Issue a warning about this use of a GNU extension. */ pedwarn (token->location, OPT_pedantic, "ISO C++ forbids computed gotos"); /* Consume the '*' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the dependent expression. */ finish_goto_stmt (cp_parser_expression (parser, /*cast_p=*/false, NULL)); } else finish_goto_stmt (cp_parser_identifier (parser)); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; default: cp_parser_error (parser, "expected jump-statement"); break; } return statement; } /* Parse a declaration-statement. declaration-statement: block-declaration */ static void cp_parser_declaration_statement (cp_parser* parser) { void *p; /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* Parse the block-declaration. */ cp_parser_block_declaration (parser, /*statement_p=*/true); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); /* Finish off the statement. */ finish_stmt (); } /* Some dependent statements (like `if (cond) statement'), are implicitly in their own scope. In other words, if the statement is a single statement (as opposed to a compound-statement), it is none-the-less treated as if it were enclosed in braces. Any declarations appearing in the dependent statement are out of scope after control passes that point. This function parses a statement, but ensures that is in its own scope, even if it is not a compound-statement. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. Returns the new statement. */ static tree cp_parser_implicitly_scoped_statement (cp_parser* parser, bool *if_p) { tree statement; if (if_p != NULL) *if_p = false; /* Mark if () ; with a special NOP_EXPR. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); statement = add_stmt (build_empty_stmt (loc)); } /* if a compound is opened, we simply parse the statement directly. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) statement = cp_parser_compound_statement (parser, NULL, false, false); /* If the token is not a `{', then we must take special action. */ else { /* Create a compound-statement. */ statement = begin_compound_stmt (0); /* Parse the dependent-statement. */ cp_parser_statement (parser, NULL_TREE, false, if_p); /* Finish the dummy compound-statement. */ finish_compound_stmt (statement); } /* Return the statement. */ return statement; } /* For some dependent statements (like `while (cond) statement'), we have already created a scope. Therefore, even if the dependent statement is a compound-statement, we do not want to create another scope. */ static void cp_parser_already_scoped_statement (cp_parser* parser) { /* If the token is a `{', then we must take special action. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_statement (parser, NULL_TREE, false, NULL); else { /* Avoid calling cp_parser_compound_statement, so that we don't create a new scope. Do everything else by hand. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* If the next keyword is `__label__' we have a label declaration. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } } /* Declarations [gram.dcl.dcl] */ /* Parse an optional declaration-sequence. declaration-seq: declaration declaration-seq declaration */ static void cp_parser_declaration_seq_opt (cp_parser* parser) { while (true) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; if (token->type == CPP_SEMICOLON) { /* A declaration consisting of a single semicolon is invalid. Allow it unless we're being pedantic. */ cp_lexer_consume_token (parser->lexer); if (!in_system_header) pedwarn (input_location, OPT_pedantic, "extra %<;%>"); continue; } /* If we're entering or exiting a region that's implicitly extern "C", modify the lang context appropriately. */ if (!parser->implicit_extern_c && token->implicit_extern_c) { push_lang_context (lang_name_c); parser->implicit_extern_c = true; } else if (parser->implicit_extern_c && !token->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } if (token->type == CPP_PRAGMA) { /* A top-level declaration can consist solely of a #pragma. A nested declaration cannot, so this is done here and not in cp_parser_declaration. (A #pragma at block scope is handled in cp_parser_statement.) */ cp_parser_pragma (parser, pragma_external); continue; } /* Parse the declaration itself. */ cp_parser_declaration (parser); } } /* Parse a declaration. declaration: block-declaration function-definition template-declaration explicit-instantiation explicit-specialization linkage-specification namespace-definition GNU extension: declaration: __extension__ declaration */ static void cp_parser_declaration (cp_parser* parser) { cp_token token1; cp_token token2; int saved_pedantic; void *p; tree attributes = NULL_TREE; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_declaration (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Try to figure out what kind of declaration is present. */ token1 = *cp_lexer_peek_token (parser->lexer); if (token1.type != CPP_EOF) token2 = *cp_lexer_peek_nth_token (parser->lexer, 2); else { token2.type = CPP_EOF; token2.keyword = RID_MAX; } /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token1.keyword == RID_EXTERN && cp_parser_is_pure_string_literal (&token2)) cp_parser_linkage_specification (parser); /* If the next token is `template', then we have either a template declaration, an explicit instantiation, or an explicit specialization. */ else if (token1.keyword == RID_TEMPLATE) { /* `template <>' indicates a template specialization. */ if (token2.type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); /* `template <' indicates a template declaration. */ else if (token2.type == CPP_LESS) cp_parser_template_declaration (parser, /*member_p=*/false); /* Anything else must be an explicit instantiation. */ else cp_parser_explicit_instantiation (parser); } /* If the next token is `export', then we have a template declaration. */ else if (token1.keyword == RID_EXPORT) cp_parser_template_declaration (parser, /*member_p=*/false); /* If the next token is `extern', 'static' or 'inline' and the one after that is `template', we have a GNU extended explicit instantiation directive. */ else if (cp_parser_allow_gnu_extensions_p (parser) && (token1.keyword == RID_EXTERN || token1.keyword == RID_STATIC || token1.keyword == RID_INLINE) && token2.keyword == RID_TEMPLATE) cp_parser_explicit_instantiation (parser); /* If the next token is `namespace', check for a named or unnamed namespace definition. */ else if (token1.keyword == RID_NAMESPACE && (/* A named namespace definition. */ (token2.type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_EQ)) /* An unnamed namespace definition. */ || token2.type == CPP_OPEN_BRACE || token2.keyword == RID_ATTRIBUTE)) cp_parser_namespace_definition (parser); /* An inline (associated) namespace definition. */ else if (token1.keyword == RID_INLINE && token2.keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* Objective-C++ declaration/definition. */ else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword)) cp_parser_objc_declaration (parser, NULL_TREE); else if (c_dialect_objc () && token1.keyword == RID_ATTRIBUTE && cp_parser_objc_valid_prefix_attributes (parser, &attributes)) cp_parser_objc_declaration (parser, attributes); /* We must have either a block declaration or a function definition. */ else /* Try to parse a block-declaration, or a function-definition. */ cp_parser_block_declaration (parser, /*statement_p=*/false); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); } /* Parse a block-declaration. block-declaration: simple-declaration asm-definition namespace-alias-definition using-declaration using-directive GNU Extension: block-declaration: __extension__ block-declaration C++0x Extension: block-declaration: static_assert-declaration If STATEMENT_P is TRUE, then this block-declaration is occurring as part of a declaration-statement. */ static void cp_parser_block_declaration (cp_parser *parser, bool statement_p) { cp_token *token1; int saved_pedantic; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_block_declaration (parser, statement_p); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Peek at the next token to figure out which kind of declaration is present. */ token1 = cp_lexer_peek_token (parser->lexer); /* If the next keyword is `asm', we have an asm-definition. */ if (token1->keyword == RID_ASM) { if (statement_p) cp_parser_commit_to_tentative_parse (parser); cp_parser_asm_definition (parser); } /* If the next keyword is `namespace', we have a namespace-alias-definition. */ else if (token1->keyword == RID_NAMESPACE) cp_parser_namespace_alias_definition (parser); /* If the next keyword is `using', we have a using-declaration, a using-directive, or an alias-declaration. */ else if (token1->keyword == RID_USING) { cp_token *token2; if (statement_p) cp_parser_commit_to_tentative_parse (parser); /* If the token after `using' is `namespace', then we have a using-directive. */ token2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (token2->keyword == RID_NAMESPACE) cp_parser_using_directive (parser); /* If the second token after 'using' is '=', then we have an alias-declaration. */ else if (cxx_dialect >= cxx0x && token2->type == CPP_NAME && ((cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ) || (cp_lexer_peek_nth_token (parser->lexer, 3)->keyword == RID_ATTRIBUTE))) cp_parser_alias_declaration (parser); /* Otherwise, it's a using-declaration. */ else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); } /* If the next keyword is `__label__' we have a misplaced label declaration. */ else if (token1->keyword == RID_LABEL) { cp_lexer_consume_token (parser->lexer); error_at (token1->location, "%<__label__%> not at the beginning of a block"); cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* If the next token is `static_assert' we have a static assertion. */ else if (token1->keyword == RID_STATIC_ASSERT) cp_parser_static_assert (parser, /*member_p=*/false); /* Anything else must be a simple-declaration. */ else cp_parser_simple_declaration (parser, !statement_p, /*maybe_range_for_decl*/NULL); } /* Parse a simple-declaration. simple-declaration: decl-specifier-seq [opt] init-declarator-list [opt] ; init-declarator-list: init-declarator init-declarator-list , init-declarator If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a function-definition as a simple-declaration. If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the parsed declaration if it is an uninitialized single declarator not followed by a `;', or to error_mark_node otherwise. Either way, the trailing `;', if present, will not be consumed. */ static void cp_parser_simple_declaration (cp_parser* parser, bool function_definition_allowed_p, tree *maybe_range_for_decl) { cp_decl_specifier_seq decl_specifiers; int declares_class_or_enum; bool saw_declarator; if (maybe_range_for_decl) *maybe_range_for_decl = NULL_TREE; /* Defer access checks until we know what is being declared; the checks for names appearing in the decl-specifier-seq should be done as if we were in the scope of the thing being declared. */ push_deferring_access_checks (dk_deferred); /* Parse the decl-specifier-seq. We have to keep track of whether or not the decl-specifier-seq declares a named class or enumeration type, since that is the only case in which the init-declarator-list is allowed to be empty. [dcl.dcl] In a simple-declaration, the optional init-declarator-list can be omitted only when declaring a class or enumeration, that is when the decl-specifier-seq contains either a class-specifier, an elaborated-type-specifier, or an enum-specifier. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* We no longer need to defer access checks. */ stop_deferring_access_checks (); /* In a block scope, a valid declaration must always have a decl-specifier-seq. By not trying to parse declarators, we can resolve the declaration/expression ambiguity more quickly. */ if (!function_definition_allowed_p && !decl_specifiers.any_specifiers_p) { cp_parser_error (parser, "expected declaration"); goto done; } /* If the next two tokens are both identifiers, the code is erroneous. The usual cause of this situation is code like: T t; where "T" should name a type -- but does not. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* If parsing tentatively, we should commit; we really are looking at a declaration. */ cp_parser_commit_to_tentative_parse (parser); /* Give up. */ goto done; } /* If we have seen at least one decl-specifier, and the next token is not a parenthesis, then we must be looking at a declaration. (After "int (" we might be looking at a functional cast.) */ if (decl_specifiers.any_specifiers_p && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && !cp_parser_error_occurred (parser)) cp_parser_commit_to_tentative_parse (parser); /* Keep going until we hit the `;' at the end of the simple declaration. */ saw_declarator = false; while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_token *token; bool function_definition_p; tree decl; if (saw_declarator) { /* If we are processing next declarator, coma is expected */ token = cp_lexer_peek_token (parser->lexer); gcc_assert (token->type == CPP_COMMA); cp_lexer_consume_token (parser->lexer); if (maybe_range_for_decl) *maybe_range_for_decl = error_mark_node; } else saw_declarator = true; /* Parse the init-declarator. */ decl = cp_parser_init_declarator (parser, &decl_specifiers, /*checks=*/NULL, function_definition_allowed_p, /*member_p=*/false, declares_class_or_enum, &function_definition_p, maybe_range_for_decl); /* If an error occurred while parsing tentatively, exit quickly. (That usually happens when in the body of a function; each statement is treated as a declaration-statement until proven otherwise.) */ if (cp_parser_error_occurred (parser)) goto done; /* Handle function definitions specially. */ if (function_definition_p) { /* If the next token is a `,', then we are probably processing something like: void f() {}, *p; which is erroneous. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "mixing" " declarations and function-definitions is forbidden"); } /* Otherwise, we're done with the list of declarators. */ else { pop_deferring_access_checks (); return; } } if (maybe_range_for_decl && *maybe_range_for_decl == NULL_TREE) *maybe_range_for_decl = decl; /* The next token should be either a `,' or a `;'. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', there are more declarators to come. */ if (token->type == CPP_COMMA) /* will be consumed next time around */; /* If it's a `;', we are done. */ else if (token->type == CPP_SEMICOLON || maybe_range_for_decl) break; /* Anything else is an error. */ else { /* If we have already issued an error message we don't need to issue another one. */ if (decl != error_mark_node || cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_error (parser, "expected %<,%> or %<;%>"); /* Skip tokens until we reach the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto done; } /* After the first time around, a function-definition is not allowed -- even if it was OK at first. For example: int i, f() {} is not valid. */ function_definition_allowed_p = false; } /* Issue an error message if no declarators are present, and the decl-specifier-seq does not itself declare a class or enumeration. */ if (!saw_declarator) { if (cp_parser_declares_only_class_p (parser)) shadow_tag (&decl_specifiers); /* Perform any deferred access checks. */ perform_deferred_access_checks (); } /* Consume the `;'. */ if (!maybe_range_for_decl) cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); done: pop_deferring_access_checks (); } /* Parse a decl-specifier-seq. decl-specifier-seq: decl-specifier-seq [opt] decl-specifier decl-specifier: storage-class-specifier type-specifier function-specifier friend typedef GNU Extension: decl-specifier: attributes Set *DECL_SPECS to a representation of the decl-specifier-seq. The parser flags FLAGS is used to control type-specifier parsing. *DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following flags: 1: one of the decl-specifiers is an elaborated-type-specifier (i.e., a type declaration) 2: one of the decl-specifiers is an enum-specifier or a class-specifier (i.e., a type definition) */ static void cp_parser_decl_specifier_seq (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, int* declares_class_or_enum) { bool constructor_possible_p = !parser->in_declarator_p; cp_token *start_token = NULL; /* Clear DECL_SPECS. */ clear_decl_specs (decl_specs); /* Assume no class or enumeration type is declared. */ *declares_class_or_enum = 0; /* Keep reading specifiers until there are no more to read. */ while (true) { bool constructor_p; bool found_decl_spec; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Save the first token of the decl spec list for error reporting. */ if (!start_token) start_token = token; /* Handle attributes. */ if (token->keyword == RID_ATTRIBUTE) { /* Parse the attributes. */ decl_specs->attributes = chainon (decl_specs->attributes, cp_parser_attributes_opt (parser)); continue; } /* Assume we will find a decl-specifier keyword. */ found_decl_spec = true; /* If the next token is an appropriate keyword, we can simply add it to the list. */ switch (token->keyword) { /* decl-specifier: friend constexpr */ case RID_FRIEND: if (!at_class_scope_p ()) { error_at (token->location, "%<friend%> used outside of class"); cp_lexer_purge_token (parser->lexer); } else { ++decl_specs->specs[(int) ds_friend]; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } break; case RID_CONSTEXPR: ++decl_specs->specs[(int) ds_constexpr]; cp_lexer_consume_token (parser->lexer); break; /* function-specifier: inline virtual explicit */ case RID_INLINE: case RID_VIRTUAL: case RID_EXPLICIT: cp_parser_function_specifier_opt (parser, decl_specs); break; /* decl-specifier: typedef */ case RID_TYPEDEF: ++decl_specs->specs[(int) ds_typedef]; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* A constructor declarator cannot appear in a typedef. */ constructor_possible_p = false; /* The "typedef" keyword can only occur in a declaration; we may as well commit at this point. */ cp_parser_commit_to_tentative_parse (parser); if (decl_specs->storage_class != sc_none) decl_specs->conflicting_specifiers_p = true; break; /* storage-class-specifier: auto register static extern mutable GNU Extension: thread */ case RID_AUTO: if (cxx_dialect == cxx98) { /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Complain about `auto' as a storage specifier, if we're complaining about C++0x compatibility. */ warning_at (token->location, OPT_Wc__0x_compat, "%<auto%>" " changes meaning in C++11; please remove it"); /* Set the storage class anyway. */ cp_parser_set_storage_class (parser, decl_specs, RID_AUTO, token->location); } else /* C++0x auto type-specifier. */ found_decl_spec = false; break; case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_set_storage_class (parser, decl_specs, token->keyword, token->location); break; case RID_THREAD: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); ++decl_specs->specs[(int) ds_thread]; break; default: /* We did not yet find a decl-specifier yet. */ found_decl_spec = false; break; } if (found_decl_spec && (flags & CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR) && token->keyword != RID_CONSTEXPR) error ("decl-specifier invalid in condition"); /* Constructors are a special case. The `S' in `S()' is not a decl-specifier; it is the beginning of the declarator. */ constructor_p = (!found_decl_spec && constructor_possible_p && (cp_parser_constructor_declarator_p (parser, decl_specs->specs[(int) ds_friend] != 0))); /* If we don't have a DECL_SPEC yet, then we must be looking at a type-specifier. */ if (!found_decl_spec && !constructor_p) { int decl_spec_declares_class_or_enum; bool is_cv_qualifier; tree type_spec; type_spec = cp_parser_type_specifier (parser, flags, decl_specs, /*is_declaration=*/true, &decl_spec_declares_class_or_enum, &is_cv_qualifier); *declares_class_or_enum |= decl_spec_declares_class_or_enum; /* If this type-specifier referenced a user-defined type (a typedef, class-name, etc.), then we can't allow any more such type-specifiers henceforth. [dcl.spec] The longest sequence of decl-specifiers that could possibly be a type name is taken as the decl-specifier-seq of a declaration. The sequence shall be self-consistent as described below. [dcl.type] As a general rule, at most one type-specifier is allowed in the complete decl-specifier-seq of a declaration. The only exceptions are the following: -- const or volatile can be combined with any other type-specifier. -- signed or unsigned can be combined with char, long, short, or int. -- .. Example: typedef char* Pc; void g (const int Pc); Here, Pc is *not* part of the decl-specifier seq; it's the declarator. Therefore, once we see a type-specifier (other than a cv-qualifier), we forbid any additional user-defined types. We *do* still allow things like `int int' to be considered a decl-specifier-seq, and issue the error message later. */ if (type_spec && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; /* A constructor declarator cannot follow a type-specifier. */ if (type_spec) { constructor_possible_p = false; found_decl_spec = true; if (!is_cv_qualifier) decl_specs->any_type_specifiers_p = true; } } /* If we still do not have a DECL_SPEC, then there are no more decl-specifiers. */ if (!found_decl_spec) break; decl_specs->any_specifiers_p = true; /* After we see one decl-specifier, further decl-specifiers are always optional. */ flags |= CP_PARSER_FLAGS_OPTIONAL; } cp_parser_check_decl_spec (decl_specs, start_token->location); /* Don't allow a friend specifier with a class definition. */ if (decl_specs->specs[(int) ds_friend] != 0 && (*declares_class_or_enum & 2)) error_at (start_token->location, "class definition may not be declared a friend"); } /* Parse an (optional) storage-class-specifier. storage-class-specifier: auto register static extern mutable GNU Extension: storage-class-specifier: thread Returns an IDENTIFIER_NODE corresponding to the keyword used. */ static tree cp_parser_storage_class_specifier_opt (cp_parser* parser) { switch (cp_lexer_peek_token (parser->lexer)->keyword) { case RID_AUTO: if (cxx_dialect != cxx98) return NULL_TREE; /* Fall through for C++98. */ case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; default: return NULL_TREE; } } /* Parse an (optional) function-specifier. function-specifier: inline virtual explicit Returns an IDENTIFIER_NODE corresponding to the keyword used. Updates DECL_SPECS, if it is non-NULL. */ static tree cp_parser_function_specifier_opt (cp_parser* parser, cp_decl_specifier_seq *decl_specs) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->keyword) { case RID_INLINE: if (decl_specs) ++decl_specs->specs[(int) ds_inline]; break; case RID_VIRTUAL: /* 14.5.2.3 [temp.mem] A member function template shall not be virtual. */ if (PROCESSING_REAL_TEMPLATE_DECL_P ()) error_at (token->location, "templates may not be %<virtual%>"); else if (decl_specs) ++decl_specs->specs[(int) ds_virtual]; break; case RID_EXPLICIT: if (decl_specs) ++decl_specs->specs[(int) ds_explicit]; break; default: return NULL_TREE; } /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; } /* Parse a linkage-specification. linkage-specification: extern string-literal { declaration-seq [opt] } extern string-literal declaration */ static void cp_parser_linkage_specification (cp_parser* parser) { tree linkage; /* Look for the `extern' keyword. */ cp_parser_require_keyword (parser, RID_EXTERN, RT_EXTERN); /* Look for the string-literal. */ linkage = cp_parser_string_literal (parser, false, false); /* Transform the literal into an identifier. If the literal is a wide-character string, or contains embedded NULs, then we can't handle it as the user wants. */ if (strlen (TREE_STRING_POINTER (linkage)) != (size_t) (TREE_STRING_LENGTH (linkage) - 1)) { cp_parser_error (parser, "invalid linkage-specification"); /* Assume C++ linkage. */ linkage = lang_name_cplusplus; } else linkage = get_identifier (TREE_STRING_POINTER (linkage)); /* We're now using the new linkage. */ push_lang_context (linkage); /* If the next token is a `{', then we're using the first production. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the declarations. */ cp_parser_declaration_seq_opt (parser); /* Look for the closing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* Otherwise, there's just one declaration. */ else { bool saved_in_unbraced_linkage_specification_p; saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = true; cp_parser_declaration (parser); parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; } /* We're done with the linkage-specification. */ pop_lang_context (); } /* Parse a static_assert-declaration. static_assert-declaration: static_assert ( constant-expression , string-literal ) ; If MEMBER_P, this static_assert is a class member. */ static void cp_parser_static_assert(cp_parser *parser, bool member_p) { tree condition; tree message; cp_token *token; location_t saved_loc; bool dummy; /* Peek at the `static_assert' token so we can keep track of exactly where the static assertion started. */ token = cp_lexer_peek_token (parser->lexer); saved_loc = token->location; /* Look for the `static_assert' keyword. */ if (!cp_parser_require_keyword (parser, RID_STATIC_ASSERT, RT_STATIC_ASSERT)) return; /* We know we are in a static assertion; commit to any tentative parse. */ if (cp_parser_parsing_tentatively (parser)) cp_parser_commit_to_tentative_parse (parser); /* Parse the `(' starting the static assertion condition. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the constant-expression. Allow a non-constant expression here in order to give better diagnostics in finish_static_assert. */ condition = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, /*non_constant_p=*/&dummy); /* Parse the separating `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Parse the string-literal message. */ message = cp_parser_string_literal (parser, /*translate=*/false, /*wide_ok=*/true); /* A `)' completes the static assertion. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); /* A semicolon terminates the declaration. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* Complete the static assertion, which may mean either processing the static assert now or saving it for template instantiation. */ finish_static_assert (condition, message, saved_loc, member_p); } /* Parse a `decltype' type. Returns the type. simple-type-specifier: decltype ( expression ) */ static tree cp_parser_decltype (cp_parser *parser) { tree expr; bool id_expression_or_member_access_p = false; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; cp_token *id_expr_start_token; cp_token *start_token = cp_lexer_peek_token (parser->lexer); if (start_token->type == CPP_DECLTYPE) { /* Already parsed. */ cp_lexer_consume_token (parser->lexer); return start_token->u.value; } /* Look for the `decltype' token. */ if (!cp_parser_require_keyword (parser, RID_DECLTYPE, RT_DECLTYPE)) return error_mark_node; /* Types cannot be defined in a `decltype' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ parser->type_definition_forbidden_message = G_("types may not be defined in %<decltype%> expressions"); /* The restrictions on constant-expressions do not apply inside decltype expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* Do not actually evaluate the expression. */ ++cp_unevaluated_operand; /* Do not warn about problems with the expression. */ ++c_inhibit_evaluation_warnings; /* Parse the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return error_mark_node; /* First, try parsing an id-expression. */ id_expr_start_token = cp_lexer_peek_token (parser->lexer); cp_parser_parse_tentatively (parser); expr = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (!cp_parser_error_occurred (parser) && expr != error_mark_node) { bool non_integral_constant_expression_p = false; tree id_expression = expr; cp_id_kind idk; const char *error_msg; if (TREE_CODE (expr) == IDENTIFIER_NODE) /* Lookup the name we got back from the id-expression. */ expr = cp_parser_lookup_name (parser, expr, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, id_expr_start_token->location); if (expr && expr != error_mark_node && TREE_CODE (expr) != TEMPLATE_ID_EXPR && TREE_CODE (expr) != TYPE_DECL && (TREE_CODE (expr) != BIT_NOT_EXPR || !TYPE_P (TREE_OPERAND (expr, 0))) && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) { /* Complete lookup of the id-expression. */ expr = (finish_id_expression (id_expression, expr, parser->scope, &idk, /*integral_constant_expression_p=*/false, /*allow_non_integral_constant_expression_p=*/true, &non_integral_constant_expression_p, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, id_expr_start_token->location)); if (expr == error_mark_node) /* We found an id-expression, but it was something that we should not have found. This is an error, not something we can recover from, so note that we found an id-expression and we'll recover as gracefully as possible. */ id_expression_or_member_access_p = true; } if (expr && expr != error_mark_node && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) /* We have an id-expression. */ id_expression_or_member_access_p = true; } if (!id_expression_or_member_access_p) { /* Abort the id-expression parse. */ cp_parser_abort_tentative_parse (parser); /* Parsing tentatively, again. */ cp_parser_parse_tentatively (parser); /* Parse a class member access. */ expr = cp_parser_postfix_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*member_access_only_p=*/true, NULL); if (expr && expr != error_mark_node && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) /* We have an id-expression. */ id_expression_or_member_access_p = true; } if (id_expression_or_member_access_p) /* We have parsed the complete id-expression or member access. */ cp_parser_parse_definitely (parser); else { bool saved_greater_than_is_operator_p; /* Abort our attempt to parse an id-expression or member access expression. */ cp_parser_abort_tentative_parse (parser); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Parse a full expression. */ expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; } /* Go back to evaluating expressions. */ --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; /* Restore the old message and the integral constant expression flags. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; /* Parse to the closing `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return error_mark_node; } expr = finish_decltype_type (expr, id_expression_or_member_access_p, tf_warning_or_error); /* Replace the decltype with a CPP_DECLTYPE so we don't need to parse it again. */ start_token->type = CPP_DECLTYPE; start_token->u.value = expr; start_token->keyword = RID_MAX; cp_lexer_purge_tokens_after (parser->lexer, start_token); return expr; } /* Special member functions [gram.special] */ /* Parse a conversion-function-id. conversion-function-id: operator conversion-type-id Returns an IDENTIFIER_NODE representing the operator. */ static tree cp_parser_conversion_function_id (cp_parser* parser) { tree type; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree pushed_scope = NULL_TREE; /* Look for the `operator' token. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR)) return error_mark_node; /* When we parse the conversion-type-id, the current scope will be reset. However, we need that information in able to look up the conversion function later, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We must enter the scope of the class so that the names of entities declared within the class are available in the conversion-type-id. For example, consider: struct S { typedef int I; operator I(); }; S::operator I() { ... } In order to see that `I' is a type-name in the definition, we must be in the scope of `S'. */ if (saved_scope) pushed_scope = push_scope (saved_scope); /* Parse the conversion-type-id. */ type = cp_parser_conversion_type_id (parser); /* Leave the scope of the class, if any. */ if (pushed_scope) pop_scope (pushed_scope); /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If the TYPE is invalid, indicate failure. */ if (type == error_mark_node) return error_mark_node; return mangle_conv_op_name_for_type (type); } /* Parse a conversion-type-id: conversion-type-id: type-specifier-seq conversion-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_conversion_type_id (cp_parser* parser) { tree attributes; cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; tree type_specified; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the type-specifiers. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifiers); /* If that didn't work, stop. */ if (type_specifiers.type == error_mark_node) return error_mark_node; /* Parse the conversion-declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME, /*initialized=*/0, &attributes); if (attributes) cplus_decl_attributes (&type_specified, attributes, /*flags=*/0); /* Don't give this error when parsing tentatively. This happens to work because we always parse this definitively once. */ if (! cp_parser_uncommitted_to_tentative_parse_p (parser) && type_uses_auto (type_specified)) { error ("invalid use of %<auto%> in conversion operator"); return error_mark_node; } return type_specified; } /* Parse an (optional) conversion-declarator. conversion-declarator: ptr-operator conversion-declarator [opt] */ static cp_declarator * cp_parser_conversion_declarator_opt (cp_parser* parser) { enum tree_code code; tree class_type; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Try the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals); /* If it worked, look for more conversion-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); return cp_parser_make_indirect_declarator (code, class_type, cv_quals, declarator); } return NULL; } /* Parse an (optional) ctor-initializer. ctor-initializer: : mem-initializer-list Returns TRUE iff the ctor-initializer was actually present. */ static bool cp_parser_ctor_initializer_opt (cp_parser* parser) { /* If the next token is not a `:', then there is no ctor-initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { /* Do default initialization of any bases and members. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (NULL_TREE); return false; } /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* And the mem-initializer-list. */ cp_parser_mem_initializer_list (parser); return true; } /* Parse a mem-initializer-list. mem-initializer-list: mem-initializer ... [opt] mem-initializer ... [opt] , mem-initializer-list */ static void cp_parser_mem_initializer_list (cp_parser* parser) { tree mem_initializer_list = NULL_TREE; tree target_ctor = error_mark_node; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Let the semantic analysis code know that we are starting the mem-initializer-list. */ if (!DECL_CONSTRUCTOR_P (current_function_decl)) error_at (token->location, "only constructors take member initializers"); /* Loop through the list. */ while (true) { tree mem_initializer; token = cp_lexer_peek_token (parser->lexer); /* Parse the mem-initializer. */ mem_initializer = cp_parser_mem_initializer (parser); /* If the next token is a `...', we're expanding member initializers. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* The TREE_PURPOSE must be a _TYPE, because base-specifiers can be expanded but members cannot. */ if (mem_initializer != error_mark_node && !TYPE_P (TREE_PURPOSE (mem_initializer))) { error_at (token->location, "cannot expand initializer for member %<%D%>", TREE_PURPOSE (mem_initializer)); mem_initializer = error_mark_node; } /* Construct the pack expansion type. */ if (mem_initializer != error_mark_node) mem_initializer = make_pack_expansion (mem_initializer); } if (target_ctor != error_mark_node && mem_initializer != error_mark_node) { error ("mem-initializer for %qD follows constructor delegation", TREE_PURPOSE (mem_initializer)); mem_initializer = error_mark_node; } /* Look for a target constructor. */ if (mem_initializer != error_mark_node && TYPE_P (TREE_PURPOSE (mem_initializer)) && same_type_p (TREE_PURPOSE (mem_initializer), current_class_type)) { maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS); if (mem_initializer_list) { error ("constructor delegation follows mem-initializer for %qD", TREE_PURPOSE (mem_initializer_list)); mem_initializer = error_mark_node; } target_ctor = mem_initializer; } /* Add it to the list, unless it was erroneous. */ if (mem_initializer != error_mark_node) { TREE_CHAIN (mem_initializer) = mem_initializer_list; mem_initializer_list = mem_initializer; } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } /* Perform semantic analysis. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (mem_initializer_list); } /* Parse a mem-initializer. mem-initializer: mem-initializer-id ( expression-list [opt] ) mem-initializer-id braced-init-list GNU extension: mem-initializer: ( expression-list [opt] ) Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base class) or FIELD_DECL (for a non-static data member) to initialize; the TREE_VALUE is the expression-list. An empty initialization list is represented by void_list_node. */ static tree cp_parser_mem_initializer (cp_parser* parser) { tree mem_initializer_id; tree expression_list; tree member; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Find out what is being initialized. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { permerror (token->location, "anachronistic old-style base class initializer"); mem_initializer_id = NULL_TREE; } else { mem_initializer_id = cp_parser_mem_initializer_id (parser); if (mem_initializer_id == error_mark_node) return mem_initializer_id; } member = expand_member_init (mem_initializer_id); if (member && !DECL_P (member)) in_base_initializer = 1; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_non_constant_p; maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expression_list = cp_parser_braced_list (parser, &expr_non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1; expression_list = build_tree_list (NULL_TREE, expression_list); } else { VEC(tree,gc)* vec; vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) return error_mark_node; expression_list = build_tree_list_vec (vec); release_tree_vector (vec); } if (expression_list == error_mark_node) return error_mark_node; if (!expression_list) expression_list = void_type_node; in_base_initializer = 0; return member ? build_tree_list (member, expression_list) : error_mark_node; } /* Parse a mem-initializer-id. mem-initializer-id: :: [opt] nested-name-specifier [opt] class-name identifier Returns a TYPE indicating the class to be initializer for the first production. Returns an IDENTIFIER_NODE indicating the data member to be initialized for the second production. */ static tree cp_parser_mem_initializer_id (cp_parser* parser) { bool global_scope_p; bool nested_name_specifier_p; bool template_p = false; tree id; cp_token *token = cp_lexer_peek_token (parser->lexer); /* `typename' is not allowed in this context ([temp.res]). */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { error_at (token->location, "keyword %<typename%> not allowed in this context (a qualified " "member initializer is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to assume that we have seen the `typename' keyword at this point. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, /*is_declaration=*/true) != NULL_TREE); if (nested_name_specifier_p) template_p = cp_parser_optional_template_keyword (parser); /* If there is a `::' operator or a nested-name-specifier, then we are definitely looking for a class-name. */ if (global_scope_p || nested_name_specifier_p) return cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* Otherwise, we could also be looking for an ordinary identifier. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ id = cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* If we found one, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, look for an ordinary identifier. */ return cp_parser_identifier (parser); } /* Overloading [gram.over] */ /* Parse an operator-function-id. operator-function-id: operator operator Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator_function_id (cp_parser* parser) { /* Look for the `operator' keyword. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR)) return error_mark_node; /* And then the name of the operator itself. */ return cp_parser_operator (parser); } /* Return an identifier node for a user-defined literal operator. The suffix identifier is chained to the operator name identifier. */ static tree cp_literal_operator_id (const char* name) { tree identifier; char *buffer = XNEWVEC (char, strlen (UDLIT_OP_ANSI_PREFIX) + strlen (name) + 10); sprintf (buffer, UDLIT_OP_ANSI_FORMAT, name); identifier = get_identifier (buffer); /*IDENTIFIER_UDLIT_OPNAME_P (identifier) = 1; If we get a flag someday. */ return identifier; } /* Parse an operator. operator: new delete new[] delete[] + - * / % ^ & | ~ ! = < > += -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= && || ++ -- , ->* -> () [] GNU Extensions: operator: <? >? <?= >?= Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator (cp_parser* parser) { tree id = NULL_TREE; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Figure out which operator we have. */ switch (token->type) { case CPP_KEYWORD: { enum tree_code op; /* The keyword should be either `new' or `delete'. */ if (token->keyword == RID_NEW) op = NEW_EXPR; else if (token->keyword == RID_DELETE) op = DELETE_EXPR; else break; /* Consume the `new' or `delete' token. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `[' token then this is the array variant of the operator. */ if (token->type == CPP_OPEN_SQUARE) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); id = ansi_opname (op == NEW_EXPR ? VEC_NEW_EXPR : VEC_DELETE_EXPR); } /* Otherwise, we have the non-array variant. */ else id = ansi_opname (op); return id; } case CPP_PLUS: id = ansi_opname (PLUS_EXPR); break; case CPP_MINUS: id = ansi_opname (MINUS_EXPR); break; case CPP_MULT: id = ansi_opname (MULT_EXPR); break; case CPP_DIV: id = ansi_opname (TRUNC_DIV_EXPR); break; case CPP_MOD: id = ansi_opname (TRUNC_MOD_EXPR); break; case CPP_XOR: id = ansi_opname (BIT_XOR_EXPR); break; case CPP_AND: id = ansi_opname (BIT_AND_EXPR); break; case CPP_OR: id = ansi_opname (BIT_IOR_EXPR); break; case CPP_COMPL: id = ansi_opname (BIT_NOT_EXPR); break; case CPP_NOT: id = ansi_opname (TRUTH_NOT_EXPR); break; case CPP_EQ: id = ansi_assopname (NOP_EXPR); break; case CPP_LESS: id = ansi_opname (LT_EXPR); break; case CPP_GREATER: id = ansi_opname (GT_EXPR); break; case CPP_PLUS_EQ: id = ansi_assopname (PLUS_EXPR); break; case CPP_MINUS_EQ: id = ansi_assopname (MINUS_EXPR); break; case CPP_MULT_EQ: id = ansi_assopname (MULT_EXPR); break; case CPP_DIV_EQ: id = ansi_assopname (TRUNC_DIV_EXPR); break; case CPP_MOD_EQ: id = ansi_assopname (TRUNC_MOD_EXPR); break; case CPP_XOR_EQ: id = ansi_assopname (BIT_XOR_EXPR); break; case CPP_AND_EQ: id = ansi_assopname (BIT_AND_EXPR); break; case CPP_OR_EQ: id = ansi_assopname (BIT_IOR_EXPR); break; case CPP_LSHIFT: id = ansi_opname (LSHIFT_EXPR); break; case CPP_RSHIFT: id = ansi_opname (RSHIFT_EXPR); break; case CPP_LSHIFT_EQ: id = ansi_assopname (LSHIFT_EXPR); break; case CPP_RSHIFT_EQ: id = ansi_assopname (RSHIFT_EXPR); break; case CPP_EQ_EQ: id = ansi_opname (EQ_EXPR); break; case CPP_NOT_EQ: id = ansi_opname (NE_EXPR); break; case CPP_LESS_EQ: id = ansi_opname (LE_EXPR); break; case CPP_GREATER_EQ: id = ansi_opname (GE_EXPR); break; case CPP_AND_AND: id = ansi_opname (TRUTH_ANDIF_EXPR); break; case CPP_OR_OR: id = ansi_opname (TRUTH_ORIF_EXPR); break; case CPP_PLUS_PLUS: id = ansi_opname (POSTINCREMENT_EXPR); break; case CPP_MINUS_MINUS: id = ansi_opname (PREDECREMENT_EXPR); break; case CPP_COMMA: id = ansi_opname (COMPOUND_EXPR); break; case CPP_DEREF_STAR: id = ansi_opname (MEMBER_REF); break; case CPP_DEREF: id = ansi_opname (COMPONENT_REF); break; case CPP_OPEN_PAREN: /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return ansi_opname (CALL_EXPR); case CPP_OPEN_SQUARE: /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return ansi_opname (ARRAY_REF); case CPP_STRING: if (cxx_dialect == cxx98) maybe_warn_cpp0x (CPP0X_USER_DEFINED_LITERALS); if (TREE_STRING_LENGTH (token->u.value) > 2) { error ("expected empty string after %<operator%> keyword"); return error_mark_node; } /* Consume the string. */ cp_lexer_consume_token (parser->lexer); /* Look for the suffix identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME) { id = cp_parser_identifier (parser); if (id != error_mark_node) { const char *name = IDENTIFIER_POINTER (id); return cp_literal_operator_id (name); } } else { error ("expected suffix identifier"); return error_mark_node; } case CPP_STRING_USERDEF: error ("missing space between %<\"\"%> and suffix identifier"); return error_mark_node; default: /* Anything else is an error. */ break; } /* If we have selected an identifier, we need to consume the operator token. */ if (id) cp_lexer_consume_token (parser->lexer); /* Otherwise, no valid operator name was present. */ else { cp_parser_error (parser, "expected operator"); id = error_mark_node; } return id; } /* Parse a template-declaration. template-declaration: export [opt] template < template-parameter-list > declaration If MEMBER_P is TRUE, this template-declaration occurs within a class-specifier. The grammar rule given by the standard isn't correct. What is really meant is: template-declaration: export [opt] template-parameter-list-seq decl-specifier-seq [opt] init-declarator [opt] ; export [opt] template-parameter-list-seq function-definition template-parameter-list-seq: template-parameter-list-seq [opt] template < template-parameter-list > */ static void cp_parser_template_declaration (cp_parser* parser, bool member_p) { /* Check for `export'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT)) { /* Consume the `export' token. */ cp_lexer_consume_token (parser->lexer); /* Warn that we do not support `export'. */ warning (0, "keyword %<export%> not implemented, and will be ignored"); } cp_parser_template_declaration_after_export (parser, member_p); } /* Parse a template-parameter-list. template-parameter-list: template-parameter template-parameter-list , template-parameter Returns a TREE_LIST. Each node represents a template parameter. The nodes are connected via their TREE_CHAINs. */ static tree cp_parser_template_parameter_list (cp_parser* parser) { tree parameter_list = NULL_TREE; begin_template_parm_list (); /* The loop below parses the template parms. We first need to know the total number of template parms to be able to compute proper canonical types of each dependent type. So after the loop, when we know the total number of template parms, end_template_parm_list computes the proper canonical types and fixes up the dependent types accordingly. */ while (true) { tree parameter; bool is_non_type; bool is_parameter_pack; location_t parm_loc; /* Parse the template-parameter. */ parm_loc = cp_lexer_peek_token (parser->lexer)->location; parameter = cp_parser_template_parameter (parser, &is_non_type, &is_parameter_pack); /* Add it to the list. */ if (parameter != error_mark_node) parameter_list = process_template_parm (parameter_list, parm_loc, parameter, is_non_type, is_parameter_pack); else { tree err_parm = build_tree_list (parameter, parameter); parameter_list = chainon (parameter_list, err_parm); } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return end_template_parm_list (parameter_list); } /* Parse a template-parameter. template-parameter: type-parameter parameter-declaration If all goes well, returns a TREE_LIST. The TREE_VALUE represents the parameter. The TREE_PURPOSE is the default value, if any. Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true iff this parameter is a non-type parameter. *IS_PARAMETER_PACK is set to true iff this parameter is a parameter pack. */ static tree cp_parser_template_parameter (cp_parser* parser, bool *is_non_type, bool *is_parameter_pack) { cp_token *token; cp_parameter_declarator *parameter_declarator; cp_declarator *id_declarator; tree parm; /* Assume it is a type parameter or a template parameter. */ *is_non_type = false; /* Assume it not a parameter pack. */ *is_parameter_pack = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is `class' or `template', we have a type-parameter. */ if (token->keyword == RID_TEMPLATE) return cp_parser_type_parameter (parser, is_parameter_pack); /* If it is `class' or `typename' we do not know yet whether it is a type parameter or a non-type parameter. Consider: template <typename T, typename T::X X> ... or: template <class C, class D*> ... Here, the first parameter is a type parameter, and the second is a non-type parameter. We can tell by looking at the token after the identifier -- if it is a `,', `=', or `>' then we have a type parameter. */ if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS) { /* Peek at the token after `class' or `typename'. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, we have a template type parameter pack. */ if (token->type == CPP_ELLIPSIS) return cp_parser_type_parameter (parser, is_parameter_pack); /* If it's an identifier, skip it. */ if (token->type == CPP_NAME) token = cp_lexer_peek_nth_token (parser->lexer, 3); /* Now, see if the token looks like the end of a template parameter. */ if (token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) return cp_parser_type_parameter (parser, is_parameter_pack); } /* Otherwise, it is a non-type parameter. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ *is_non_type = true; parameter_declarator = cp_parser_parameter_declaration (parser, /*template_parm_p=*/true, /*parenthesized_p=*/NULL); /* If the parameter declaration is marked as a parameter pack, set *IS_PARAMETER_PACK to notify the caller. Also, unmark the declarator's PACK_EXPANSION_P, otherwise we'll get errors from grokdeclarator. */ if (parameter_declarator && parameter_declarator->declarator && parameter_declarator->declarator->parameter_pack_p) { *is_parameter_pack = true; parameter_declarator->declarator->parameter_pack_p = false; } /* If the next token is an ellipsis, and we don't already have it marked as a parameter pack, then we have a parameter pack (that has no declarator). */ if (!*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) && declarator_can_be_parameter_pack (parameter_declarator->declarator)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* We might end up with a pack expansion as the type of the non-type template parameter, in which case this is a non-type template parameter pack. */ else if (parameter_declarator && parameter_declarator->decl_specifiers.type && PACK_EXPANSION_P (parameter_declarator->decl_specifiers.type)) { *is_parameter_pack = true; parameter_declarator->decl_specifiers.type = PACK_EXPANSION_PATTERN (parameter_declarator->decl_specifiers.type); } if (*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Parameter packs cannot have default arguments. However, a user may try to do so, so we'll parse them and give an appropriate diagnostic here. */ cp_token *start_token = cp_lexer_peek_token (parser->lexer); /* Find the name of the parameter pack. */ id_declarator = parameter_declarator->declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; if (id_declarator && id_declarator->kind == cdk_id) error_at (start_token->location, "template parameter pack %qD cannot have a default argument", id_declarator->u.id.unqualified_name); else error_at (start_token->location, "template parameter pack cannot have a default argument"); /* Parse the default argument, but throw away the result. */ cp_parser_default_argument (parser, /*template_parm_p=*/true); } parm = grokdeclarator (parameter_declarator->declarator, &parameter_declarator->decl_specifiers, TPARM, /*initialized=*/0, /*attrlist=*/NULL); if (parm == error_mark_node) return error_mark_node; return build_tree_list (parameter_declarator->default_argument, parm); } /* Parse a type-parameter. type-parameter: class identifier [opt] class identifier [opt] = type-id typename identifier [opt] typename identifier [opt] = type-id template < template-parameter-list > class identifier [opt] template < template-parameter-list > class identifier [opt] = id-expression GNU Extension (variadic templates): type-parameter: class ... identifier [opt] typename ... identifier [opt] Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is the declaration of the parameter. Sets *IS_PARAMETER_PACK if this is a template parameter pack. */ static tree cp_parser_type_parameter (cp_parser* parser, bool *is_parameter_pack) { cp_token *token; tree parameter; /* Look for a keyword to tell us what kind of parameter this is. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_TYPENAME_TEMPLATE); if (!token) return error_mark_node; switch (token->keyword) { case RID_CLASS: case RID_TYPENAME: { tree identifier; tree default_argument; /* If the next token is an ellipsis, we have a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* If the next token is an identifier, then it names the parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Create the parameter. */ parameter = finish_template_type_parm (class_type_node, identifier); /* If the next token is an `=', we have a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the default-argument. */ push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_type_id (parser); /* Template parameter packs cannot have default arguments. */ if (*is_parameter_pack) { if (identifier) error_at (token->location, "template parameter pack %qD cannot have a " "default argument", identifier); else error_at (token->location, "template parameter packs cannot have " "default arguments"); default_argument = NULL_TREE; } pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; case RID_TEMPLATE: { tree identifier; tree default_argument; /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Parse the template-parameter-list. */ cp_parser_template_parameter_list (parser); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* Look for the `class' keyword. */ cp_parser_require_keyword (parser, RID_CLASS, RT_CLASS); /* If the next token is an ellipsis, we have a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* If the next token is an `=', then there is a default-argument. If the next token is a `>', we are at the end of the parameter-list. If the next token is a `,', then we are at the end of this parameter. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER) && cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) { identifier = cp_parser_identifier (parser); /* Treat invalid names as if the parameter were nameless. */ if (identifier == error_mark_node) identifier = NULL_TREE; } else identifier = NULL_TREE; /* Create the template parameter. */ parameter = finish_template_template_parm (class_type_node, identifier); /* If the next token is an `=', then there is a default-argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { bool is_template; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the id-expression. */ push_deferring_access_checks (dk_no_deferred); /* save token before parsing the id-expression, for error reporting */ token = cp_lexer_peek_token (parser->lexer); default_argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/&is_template, /*declarator_p=*/false, /*optional_p=*/false); if (TREE_CODE (default_argument) == TYPE_DECL) /* If the id-expression was a template-id that refers to a template-class, we already have the declaration here, so no further lookup is needed. */ ; else /* Look up the name. */ default_argument = cp_parser_lookup_name (parser, default_argument, none_type, /*is_template=*/is_template, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, token->location); /* See if the default argument is valid. */ default_argument = check_template_template_default_arg (default_argument); /* Template parameter packs cannot have default arguments. */ if (*is_parameter_pack) { if (identifier) error_at (token->location, "template parameter pack %qD cannot " "have a default argument", identifier); else error_at (token->location, "template parameter packs cannot " "have default arguments"); default_argument = NULL_TREE; } pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; default: gcc_unreachable (); break; } return parameter; } /* Parse a template-id. template-id: template-name < template-argument-list [opt] > If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the `template' keyword. In this case, a TEMPLATE_ID_EXPR will be returned. Otherwise, if the template-name names a function, or set of functions, returns a TEMPLATE_ID_EXPR. If the template-name names a class, returns a TYPE_DECL for the specialization. If CHECK_DEPENDENCY_P is FALSE, names are looked up in uninstantiated templates. */ static tree cp_parser_template_id (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration) { int i; tree templ; tree arguments; tree template_id; cp_token_position start_of_id = 0; deferred_access_check *chk; VEC (deferred_access_check,gc) *access_check; cp_token *next_token = NULL, *next_token_2 = NULL; bool is_identifier; /* If the next token corresponds to a template-id, there is no need to reparse it. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type == CPP_TEMPLATE_ID) { struct tree_check *check_value; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ access_check = check_value->checks; if (access_check) { FOR_EACH_VEC_ELT (deferred_access_check, access_check, i, chk) perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl); } /* Return the stored value. */ return check_value->value; } /* Avoid performing name lookup if there is no possibility of finding a template-id. */ if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR) || (next_token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))) { cp_parser_error (parser, "expected template-id"); return error_mark_node; } /* Remember where the template-id starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start_of_id = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); /* Parse the template-name. */ is_identifier = false; templ = cp_parser_template_name (parser, template_keyword_p, check_dependency_p, is_declaration, &is_identifier); if (templ == error_mark_node || is_identifier) { pop_deferring_access_checks (); return templ; } /* If we find the sequence `[:' after a template-name, it's probably a digraph-typo for `< ::'. Substitute the tokens and check if we can parse correctly the argument list. */ next_token = cp_lexer_peek_token (parser->lexer); next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (next_token->type == CPP_OPEN_SQUARE && next_token->flags & DIGRAPH && next_token_2->type == CPP_COLON && !(next_token_2->flags & PREV_WHITE)) { cp_parser_parse_tentatively (parser); /* Change `:' into `::'. */ next_token_2->type = CPP_SCOPE; /* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is CPP_LESS. */ cp_lexer_consume_token (parser->lexer); /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); if (!cp_parser_parse_definitely (parser)) { /* If we couldn't parse an argument list, then we revert our changes and return simply an error. Maybe this is not a template-id after all. */ next_token_2->type = CPP_COLON; cp_parser_error (parser, "expected %<<%>"); pop_deferring_access_checks (); return error_mark_node; } /* Otherwise, emit an error about the invalid digraph, but continue parsing because we got our argument list. */ if (permerror (next_token->location, "%<<::%> cannot begin a template-argument list")) { static bool hint = false; inform (next_token->location, "%<<:%> is an alternate spelling for %<[%>." " Insert whitespace between %<<%> and %<::%>"); if (!hint && !flag_permissive) { inform (next_token->location, "(if you use %<-fpermissive%>" " G++ will accept your code)"); hint = true; } } } else { /* Look for the `<' that starts the template-argument-list. */ if (!cp_parser_require (parser, CPP_LESS, RT_LESS)) { pop_deferring_access_checks (); return error_mark_node; } /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); } /* Build a representation of the specialization. */ if (TREE_CODE (templ) == IDENTIFIER_NODE) template_id = build_min_nt (TEMPLATE_ID_EXPR, templ, arguments); else if (DECL_TYPE_TEMPLATE_P (templ) || DECL_TEMPLATE_TEMPLATE_PARM_P (templ)) { bool entering_scope; /* In "template <typename T> ... A<T>::", A<T> is the abstract A template (rather than some instantiation thereof) only if is not nested within some other construct. For example, in "template <typename T> void f(T) { A<T>::", A<T> is just an instantiation of A. */ entering_scope = (template_parm_scope_p () && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)); template_id = finish_template_type (templ, arguments, entering_scope); } else { /* If it's not a class-template or a template-template, it should be a function-template. */ gcc_assert ((DECL_FUNCTION_TEMPLATE_P (templ) || TREE_CODE (templ) == OVERLOAD || BASELINK_P (templ))); template_id = lookup_template_function (templ, arguments); } /* If parsing tentatively, replace the sequence of tokens that makes up the template-id with a CPP_TEMPLATE_ID token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages about problems during instantiation of the template. */ if (start_of_id) { cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id); /* Reset the contents of the START_OF_ID token. */ token->type = CPP_TEMPLATE_ID; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = ggc_alloc_cleared_tree_check (); token->u.tree_check_value->value = template_id; token->u.tree_check_value->checks = get_deferred_access_checks (); token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start_of_id); /* ??? Can we actually assume that, if template_id == error_mark_node, we will have issued a diagnostic to the user, as opposed to simply marking the tentative parse as failed? */ if (cp_parser_error_occurred (parser) && template_id != error_mark_node) error_at (token->location, "parse error in template argument list"); } pop_deferring_access_checks (); return template_id; } /* Parse a template-name. template-name: identifier The standard should actually say: template-name: identifier operator-function-id A defect report has been filed about this issue. A conversion-function-id cannot be a template name because they cannot be part of a template-id. In fact, looking at this code: a.operator K<int>() the conversion-function-id is "operator K<int>", and K<int> is a type-id. It is impossible to call a templated conversion-function-id with an explicit argument list, since the only allowed template parameter is the type to which it is converting. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword, in a construction like: T::template f<3>() In that case `f' is taken to be a template-name, even though there is no way of knowing for sure. Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the name refers to a set of overloaded functions, at least one of which is a template, or an IDENTIFIER_NODE with the name of the template, if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE, names are looked up inside uninstantiated templates. */ static tree cp_parser_template_name (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration, bool *is_identifier) { tree identifier; tree decl; tree fns; cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `operator', then we have either an operator-function-id or a conversion-function-id. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR)) { /* We don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ identifier = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) { cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* Look for the identifier. */ else identifier = cp_parser_identifier (parser); /* If we didn't find an identifier, we don't have a template-id. */ if (identifier == error_mark_node) return error_mark_node; /* If the name immediately followed the `template' keyword, then it is a template-name. However, if the next token is not `<', then we do not treat it as a template-name, since it is not being used as part of a template-id. This enables us to handle constructs like: template <typename T> struct S { S(); }; template <typename T> S<T>::S(); correctly. We would treat `S' as a template -- if it were `S<T>' -- but we do not if there is no `<'. */ if (processing_template_decl && cp_parser_nth_token_starts_template_argument_list_p (parser, 1)) { /* In a declaration, in a dependent context, we pretend that the "template" keyword was present in order to improve error recovery. For example, given: template <typename T> void f(T::X<int>); we want to treat "X<int>" as a template-id. */ if (is_declaration && !template_keyword_p && parser->scope && TYPE_P (parser->scope) && check_dependency_p && dependent_scope_p (parser->scope) /* Do not do this for dtors (or ctors), since they never need the template keyword before their name. */ && !constructor_name_p (identifier, parser->scope)) { cp_token_position start = 0; /* Explain what went wrong. */ error_at (token->location, "non-template %qD used as template", identifier); inform (token->location, "use %<%T::template %D%> to indicate that it is a template", parser->scope, identifier); /* If parsing tentatively, find the location of the "<" token. */ if (cp_parser_simulate_error (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Parse the template arguments so that we can issue error messages about them. */ cp_lexer_consume_token (parser->lexer); cp_parser_enclosed_template_argument_list (parser); /* Skip tokens until we find a good place from which to continue parsing. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/false); /* If parsing tentatively, permanently remove the template argument list. That will prevent duplicate error messages from being issued about the missing "template" keyword. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); if (is_identifier) *is_identifier = true; return identifier; } /* If the "template" keyword is present, then there is generally no point in doing name-lookup, so we just return IDENTIFIER. But, if the qualifying scope is non-dependent then we can (and must) do name-lookup normally. */ if (template_keyword_p && (!parser->scope || (TYPE_P (parser->scope) && dependent_type_p (parser->scope)))) return identifier; } /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/true, /*is_namespace=*/false, check_dependency_p, /*ambiguous_decls=*/NULL, token->location); /* If DECL is a template, then the name was a template-name. */ if (TREE_CODE (decl) == TEMPLATE_DECL) ; else { tree fn = NULL_TREE; /* The standard does not explicitly indicate whether a name that names a set of overloaded declarations, some of which are templates, is a template-name. However, such a name should be a template-name; otherwise, there is no way to form a template-id for the overloaded templates. */ fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl; if (TREE_CODE (fns) == OVERLOAD) for (fn = fns; fn; fn = OVL_NEXT (fn)) if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL) break; if (!fn) { /* The name does not name a template. */ cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* If DECL is dependent, and refers to a function, then just return its name; we will look it up again during template instantiation. */ if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl)) { tree scope = ovl_scope (decl); if (TYPE_P (scope) && dependent_type_p (scope)) return identifier; } return decl; } /* Parse a template-argument-list. template-argument-list: template-argument ... [opt] template-argument-list , template-argument ... [opt] Returns a TREE_VEC containing the arguments. */ static tree cp_parser_template_argument_list (cp_parser* parser) { tree fixed_args[10]; unsigned n_args = 0; unsigned alloced = 10; tree *arg_ary = fixed_args; tree vec; bool saved_in_template_argument_list_p; bool saved_ice_p; bool saved_non_ice_p; saved_in_template_argument_list_p = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; /* Even if the template-id appears in an integral constant-expression, the contents of the argument list do not. */ saved_ice_p = parser->integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_non_ice_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = false; /* Parse the arguments. */ do { tree argument; if (n_args) /* Consume the comma. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-argument. */ argument = cp_parser_template_argument (parser); /* If the next token is an ellipsis, we're expanding a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { if (argument == error_mark_node) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "expected parameter pack before %<...%>"); } /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* Make the argument into a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ argument = make_pack_expansion (argument); } if (n_args == alloced) { alloced *= 2; if (arg_ary == fixed_args) { arg_ary = XNEWVEC (tree, alloced); memcpy (arg_ary, fixed_args, sizeof (tree) * n_args); } else arg_ary = XRESIZEVEC (tree, arg_ary, alloced); } arg_ary[n_args++] = argument; } while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); vec = make_tree_vec (n_args); while (n_args--) TREE_VEC_ELT (vec, n_args) = arg_ary[n_args]; if (arg_ary != fixed_args) free (arg_ary); parser->non_integral_constant_expression_p = saved_non_ice_p; parser->integral_constant_expression_p = saved_ice_p; parser->in_template_argument_list_p = saved_in_template_argument_list_p; #ifdef ENABLE_CHECKING SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); #endif return vec; } /* Parse a template-argument. template-argument: assignment-expression type-id id-expression The representation is that of an assignment-expression, type-id, or id-expression -- except that the qualified id-expression is evaluated, so that the value returned is either a DECL or an OVERLOAD. Although the standard says "assignment-expression", it forbids throw-expressions or assignments in the template argument. Therefore, we use "conditional-expression" instead. */ static tree cp_parser_template_argument (cp_parser* parser) { tree argument; bool template_p; bool address_p; bool maybe_type_id = false; cp_token *token = NULL, *argument_start_token = NULL; cp_id_kind idk; /* There's really no way to know what we're looking at, so we just try each alternative in order. [temp.arg] In a template-argument, an ambiguity between a type-id and an expression is resolved to a type-id, regardless of the form of the corresponding template-parameter. Therefore, we try a type-id first. */ cp_parser_parse_tentatively (parser); argument = cp_parser_template_type_arg (parser); /* If there was no error parsing the type-id but the next token is a '>>', our behavior depends on which dialect of C++ we're parsing. In C++98, we probably found a typo for '> >'. But there are type-id which are also valid expressions. For instance: struct X { int operator >> (int); }; template <int V> struct Foo {}; Foo<X () >> 5> r; Here 'X()' is a valid type-id of a function type, but the user just wanted to write the expression "X() >> 5". Thus, we remember that we found a valid type-id, but we still try to parse the argument as an expression to see what happens. In C++0x, the '>>' will be considered two separate '>' tokens. */ if (!cp_parser_error_occurred (parser) && cxx_dialect == cxx98 && cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { maybe_type_id = true; cp_parser_abort_tentative_parse (parser); } else { /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. This means that the argument is not a valid type-id. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return argument; } /* We're still not sure what the argument will be. */ cp_parser_parse_tentatively (parser); /* Try a template. */ argument_start_token = cp_lexer_peek_token (parser->lexer); argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (!cp_parser_error_occurred (parser)) { /* Figure out what is being referred to. If the id-expression was for a class template specialization, then we will have a TYPE_DECL at this point. There is no need to do name lookup at this point in that case. */ if (TREE_CODE (argument) != TYPE_DECL) argument = cp_parser_lookup_name (parser, argument, none_type, /*is_template=*/template_p, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, argument_start_token->location); if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) cp_parser_error (parser, "expected template-name"); } if (cp_parser_parse_definitely (parser)) return argument; /* It must be a non-type argument. There permitted cases are given in [temp.arg.nontype]: -- an integral constant-expression of integral or enumeration type; or -- the name of a non-type template-parameter; or -- the name of an object or function with external linkage... -- the address of an object or function with external linkage... -- a pointer to member... */ /* Look for a non-type template parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) return argument; } /* If the next token is "&", the argument must be the address of an object or function with external linkage. */ address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND); if (address_p) cp_lexer_consume_token (parser->lexer); /* See if we might have an id-expression. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->keyword == RID_OPERATOR || token->type == CPP_SCOPE || token->type == CPP_TEMPLATE_ID || token->type == CPP_NESTED_NAME_SPECIFIER) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, address_p, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (cp_parser_error_occurred (parser) || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_abort_tentative_parse (parser); else { tree probe; if (TREE_CODE (argument) == INDIRECT_REF) { gcc_assert (REFERENCE_REF_P (argument)); argument = TREE_OPERAND (argument, 0); } /* If we're in a template, we represent a qualified-id referring to a static data member as a SCOPE_REF even if the scope isn't dependent so that we can check access control later. */ probe = argument; if (TREE_CODE (probe) == SCOPE_REF) probe = TREE_OPERAND (probe, 1); if (TREE_CODE (probe) == VAR_DECL) { /* A variable without external linkage might still be a valid constant-expression, so no error is issued here if the external-linkage check fails. */ if (!address_p && !DECL_EXTERNAL_LINKAGE_P (probe)) cp_parser_simulate_error (parser); } else if (is_overloaded_fn (argument)) /* All overloaded functions are allowed; if the external linkage test does not pass, an error will be issued later. */ ; else if (address_p && (TREE_CODE (argument) == OFFSET_REF || TREE_CODE (argument) == SCOPE_REF)) /* A pointer-to-member. */ ; else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX) ; else cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) { if (address_p) argument = build_x_unary_op (ADDR_EXPR, argument, tf_warning_or_error); return argument; } } } /* If the argument started with "&", there are no other valid alternatives at this point. */ if (address_p) { cp_parser_error (parser, "invalid non-type template argument"); return error_mark_node; } /* If the argument wasn't successfully parsed as a type-id followed by '>>', the argument can only be a constant expression now. Otherwise, we try parsing the constant-expression tentatively, because the argument could really be a type-id. */ if (maybe_type_id) cp_parser_parse_tentatively (parser); argument = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, /*non_constant_p=*/NULL); argument = fold_non_dependent_expr (argument); if (!maybe_type_id) return argument; if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (cp_parser_parse_definitely (parser)) return argument; /* We did our best to parse the argument as a non type-id, but that was the only alternative that matched (albeit with a '>' after it). We can assume it's just a typo from the user, and a diagnostic will then be issued. */ return cp_parser_template_type_arg (parser); } /* Parse an explicit-instantiation. explicit-instantiation: template declaration Although the standard says `declaration', what it really means is: explicit-instantiation: template decl-specifier-seq [opt] declarator [opt] ; Things like `template int S<int>::i = 5, int S<double>::j;' are not supposed to be allowed. A defect report has been filed about this issue. GNU Extension: explicit-instantiation: storage-class-specifier template decl-specifier-seq [opt] declarator [opt] ; function-specifier template decl-specifier-seq [opt] declarator [opt] ; */ static void cp_parser_explicit_instantiation (cp_parser* parser) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; tree extension_specifier = NULL_TREE; timevar_push (TV_TEMPLATE_INST); /* Look for an (optional) storage-class-specifier or function-specifier. */ if (cp_parser_allow_gnu_extensions_p (parser)) { extension_specifier = cp_parser_storage_class_specifier_opt (parser); if (!extension_specifier) extension_specifier = cp_parser_function_specifier_opt (parser, /*decl_specs=*/NULL); } /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE); /* Let the front end know that we are processing an explicit instantiation. */ begin_explicit_instantiation (); /* [temp.explicit] says that we are supposed to ignore access control while processing explicit instantiation directives. */ push_deferring_access_checks (dk_no_check); /* Parse a decl-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* If there was exactly one decl-specifier, and it declared a class, and there's no declarator, then we have an explicit type instantiation. */ if (declares_class_or_enum && cp_parser_declares_only_class_p (parser)) { tree type; type = check_tag_decl (&decl_specifiers); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); if (type) do_type_instantiation (type, extension_specifier, /*complain=*/tf_error); } else { cp_declarator *declarator; tree decl; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type, decl_specifiers.type_location); if (declarator != cp_error_declarator) { if (decl_specifiers.specs[(int)ds_inline]) permerror (input_location, "explicit instantiation shall not use" " %<inline%> specifier"); if (decl_specifiers.specs[(int)ds_constexpr]) permerror (input_location, "explicit instantiation shall not use" " %<constexpr%> specifier"); decl = grokdeclarator (declarator, &decl_specifiers, NORMAL, 0, &decl_specifiers.attributes); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); /* Do the explicit instantiation. */ do_decl_instantiation (decl, extension_specifier); } else { pop_deferring_access_checks (); /* Skip the body of the explicit instantiation. */ cp_parser_skip_to_end_of_statement (parser); } } /* We're done with the instantiation. */ end_explicit_instantiation (); cp_parser_consume_semicolon_at_end_of_statement (parser); timevar_pop (TV_TEMPLATE_INST); } /* Parse an explicit-specialization. explicit-specialization: template < > declaration Although the standard says `declaration', what it really means is: explicit-specialization: template <> decl-specifier [opt] init-declarator [opt] ; template <> function-definition template <> explicit-specialization template <> template-declaration */ static void cp_parser_explicit_specialization (cp_parser* parser) { bool need_lang_pop; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE); /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* We have processed another parameter list. */ ++parser->num_template_parameter_lists; /* [temp] A template ... explicit specialization ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error_at (token->location, "template specialization with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* Let the front end know that we are beginning a specialization. */ if (!begin_specialization ()) { end_specialization (); return; } /* If the next keyword is `template', we need to figure out whether or not we're looking a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER) cp_parser_template_declaration_after_export (parser, /*member_p=*/false); else cp_parser_explicit_specialization (parser); } else /* Parse the dependent declaration. */ cp_parser_single_declaration (parser, /*checks=*/NULL, /*member_p=*/false, /*explicit_specialization_p=*/true, /*friend_p=*/NULL); /* We're done with the specialization. */ end_specialization (); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* We're done with this parameter list. */ --parser->num_template_parameter_lists; } /* Parse a type-specifier. type-specifier: simple-type-specifier class-specifier enum-specifier elaborated-type-specifier cv-qualifier GNU Extension: type-specifier: __complex__ Returns a representation of the type-specifier. For a class-specifier, enum-specifier, or elaborated-type-specifier, a TREE_TYPE is returned; otherwise, a TYPE_DECL is returned. The parser flags FLAGS is used to control type-specifier parsing. If IS_DECLARATION is TRUE, then this type-specifier is appearing in a decl-specifier-seq. If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a class-specifier, enum-specifier, or elaborated-type-specifier, then *DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1 if a type is declared; 2 if it is defined. Otherwise, it is set to zero. If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it is set to FALSE. */ static tree cp_parser_type_specifier (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, bool is_declaration, int* declares_class_or_enum, bool* is_cv_qualifier) { tree type_spec = NULL_TREE; cp_token *token; enum rid keyword; cp_decl_spec ds = ds_last; /* Assume this type-specifier does not declare a new type. */ if (declares_class_or_enum) *declares_class_or_enum = 0; /* And that it does not specify a cv-qualifier. */ if (is_cv_qualifier) *is_cv_qualifier = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, we can use that to guide the production we choose. */ keyword = token->keyword; switch (keyword) { case RID_ENUM: if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS)) goto elaborated_type_specifier; /* Look for the enum-specifier. */ type_spec = cp_parser_enum_specifier (parser); /* If that worked, we're done. */ if (type_spec) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token->location, /*type_definition_p=*/true); return type_spec; } else goto elaborated_type_specifier; /* Any of these indicate either a class-specifier, or an elaborated-type-specifier. */ case RID_CLASS: case RID_STRUCT: case RID_UNION: if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS)) goto elaborated_type_specifier; /* Parse tentatively so that we can back up if we don't find a class-specifier. */ cp_parser_parse_tentatively (parser); /* Look for the class-specifier. */ type_spec = cp_parser_class_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, type_spec); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token->location, /*type_definition_p=*/true); return type_spec; } /* Fall through. */ elaborated_type_specifier: /* We're declaring (not defining) a class or enum. */ if (declares_class_or_enum) *declares_class_or_enum = 1; /* Fall through. */ case RID_TYPENAME: /* Look for an elaborated-type-specifier. */ type_spec = (cp_parser_elaborated_type_specifier (parser, decl_specs && decl_specs->specs[(int) ds_friend], is_declaration)); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token->location, /*type_definition_p=*/false); return type_spec; case RID_CONST: ds = ds_const; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_VOLATILE: ds = ds_volatile; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_RESTRICT: ds = ds_restrict; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_COMPLEX: /* The `__complex__' keyword is a GNU extension. */ ds = ds_complex; break; default: break; } /* Handle simple keywords. */ if (ds != ds_last) { if (decl_specs) { ++decl_specs->specs[(int)ds]; decl_specs->any_specifiers_p = true; } return cp_lexer_consume_token (parser->lexer)->u.value; } /* If we do not already have a type-specifier, assume we are looking at a simple-type-specifier. */ type_spec = cp_parser_simple_type_specifier (parser, decl_specs, flags); /* If we didn't find a type-specifier, and a type-specifier was not optional in this context, issue an error message. */ if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type specifier"); return error_mark_node; } return type_spec; } /* Parse a simple-type-specifier. simple-type-specifier: :: [opt] nested-name-specifier [opt] type-name :: [opt] nested-name-specifier template template-id char wchar_t bool short int long signed unsigned float double void C++0x Extension: simple-type-specifier: auto decltype ( expression ) char16_t char32_t __underlying_type ( type-id ) GNU Extension: simple-type-specifier: __int128 __typeof__ unary-expression __typeof__ ( type-id ) Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is appropriately updated. */ static tree cp_parser_simple_type_specifier (cp_parser* parser, cp_decl_specifier_seq *decl_specs, cp_parser_flags flags) { tree type = NULL_TREE; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, things are easy. */ switch (token->keyword) { case RID_CHAR: if (decl_specs) decl_specs->explicit_char_p = true; type = char_type_node; break; case RID_CHAR16: type = char16_type_node; break; case RID_CHAR32: type = char32_type_node; break; case RID_WCHAR: type = wchar_type_node; break; case RID_BOOL: type = boolean_type_node; break; case RID_SHORT: if (decl_specs) ++decl_specs->specs[(int) ds_short]; type = short_integer_type_node; break; case RID_INT: if (decl_specs) decl_specs->explicit_int_p = true; type = integer_type_node; break; case RID_INT128: if (!int128_integer_type_node) break; if (decl_specs) decl_specs->explicit_int128_p = true; type = int128_integer_type_node; break; case RID_LONG: if (decl_specs) ++decl_specs->specs[(int) ds_long]; type = long_integer_type_node; break; case RID_SIGNED: if (decl_specs) ++decl_specs->specs[(int) ds_signed]; type = integer_type_node; break; case RID_UNSIGNED: if (decl_specs) ++decl_specs->specs[(int) ds_unsigned]; type = unsigned_type_node; break; case RID_FLOAT: type = float_type_node; break; case RID_DOUBLE: type = double_type_node; break; case RID_VOID: type = void_type_node; break; case RID_AUTO: maybe_warn_cpp0x (CPP0X_AUTO); type = make_auto (); break; case RID_DECLTYPE: /* Since DR 743, decltype can either be a simple-type-specifier by itself or begin a nested-name-specifier. Parsing it will replace it with a CPP_DECLTYPE, so just rewind and let the CPP_DECLTYPE handling below decide what to do. */ cp_parser_decltype (parser); cp_lexer_set_token_position (parser->lexer, token); break; case RID_TYPEOF: /* Consume the `typeof' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand to `typeof'. */ type = cp_parser_sizeof_operand (parser, RID_TYPEOF); /* If it is not already a TYPE, take its type. */ if (!TYPE_P (type)) type = finish_typeof (type); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); return type; case RID_UNDERLYING_TYPE: type = cp_parser_trait_expr (parser, RID_UNDERLYING_TYPE); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); return type; case RID_BASES: case RID_DIRECT_BASES: type = cp_parser_trait_expr (parser, token->keyword); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); return type; default: break; } /* If token is an already-parsed decltype not followed by ::, it's a simple-type-specifier. */ if (token->type == CPP_DECLTYPE && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) { type = token->u.value; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); cp_lexer_consume_token (parser->lexer); return type; } /* If the type-specifier was for a built-in type, we're done. */ if (type) { /* Record the type. */ if (decl_specs && (token->keyword != RID_SIGNED && token->keyword != RID_UNSIGNED && token->keyword != RID_SHORT && token->keyword != RID_LONG)) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); if (decl_specs) decl_specs->any_specifiers_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, type, token->location); return TYPE_NAME (type); } /* The type-specifier must be a user-defined type. */ if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES)) { bool qualified_p; bool global_p; /* Don't gobble tokens or issue error messages if this is an optional type-specifier. */ if (flags & CP_PARSER_FLAGS_OPTIONAL) cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ global_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the nested-name specifier. */ qualified_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); token = cp_lexer_peek_token (parser->lexer); /* If we have seen a nested-name-specifier, and the next token is `template', then we are using the template-id production. */ if (parser->scope && cp_parser_optional_template_keyword (parser)) { /* Look for the template-id. */ type = cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/true, /*is_declaration=*/false); /* If the template-id did not name a type, we are out of luck. */ if (TREE_CODE (type) != TYPE_DECL) { cp_parser_error (parser, "expected template-id for type"); type = NULL_TREE; } } /* Otherwise, look for a type-name. */ else type = cp_parser_type_name (parser); /* Keep track of all name-lookups performed in class scopes. */ if (type && !global_p && !qualified_p && TREE_CODE (type) == TYPE_DECL && TREE_CODE (DECL_NAME (type)) == IDENTIFIER_NODE) maybe_note_name_used_in_class (DECL_NAME (type), type); /* If it didn't work out, we don't have a TYPE. */ if ((flags & CP_PARSER_FLAGS_OPTIONAL) && !cp_parser_parse_definitely (parser)) type = NULL_TREE; if (type && decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token->location, /*type_definition_p=*/false); } /* If we didn't get a type-name, issue an error message. */ if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type-name"); return error_mark_node; } if (type && type != error_mark_node) { /* See if TYPE is an Objective-C type, and if so, parse and accept any protocol references following it. Do this before the cp_parser_check_for_invalid_template_id() call, because Objective-C types can be followed by '<...>' which would enclose protocol names rather than template arguments, and so everything is fine. */ if (c_dialect_objc () && !parser->scope && (objc_is_id (type) || objc_is_class_name (type))) { tree protos = cp_parser_objc_protocol_refs_opt (parser); tree qual_type = objc_get_protocol_qualified_type (type, protos); /* Clobber the "unqualified" type previously entered into DECL_SPECS with the new, improved protocol-qualified version. */ if (decl_specs) decl_specs->type = qual_type; return qual_type; } /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type), token->location); } return type; } /* Parse a type-name. type-name: class-name enum-name typedef-name simple-template-id [in c++0x] enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_type_name (cp_parser* parser) { tree type_decl; /* We can't know yet whether it is a class-name or not. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/false); /* If it's not a class-name, keep looking. */ if (!cp_parser_parse_definitely (parser)) { if (cxx_dialect < cxx0x) /* It must be a typedef-name or an enum-name. */ return cp_parser_nonclass_name (parser); cp_parser_parse_tentatively (parser); /* It is either a simple-template-id representing an instantiation of an alias template... */ type_decl = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*is_declaration=*/false); /* Note that this must be an instantiation of an alias template because [temp.names]/6 says: A template-id that names an alias template specialization is a type-name. Whereas [temp.names]/7 says: A simple-template-id that names a class template specialization is a class-name. */ if (type_decl != NULL_TREE && TREE_CODE (type_decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (type_decl)) gcc_assert (DECL_TEMPLATE_INSTANTIATION (type_decl)); else cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) /* ... Or a typedef-name or an enum-name. */ return cp_parser_nonclass_name (parser); } return type_decl; } /* Parse a non-class type-name, that is, either an enum-name or a typedef-name. enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_nonclass_name (cp_parser* parser) { tree type_decl; tree identifier; cp_token *token = cp_lexer_peek_token (parser->lexer); identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the type-name. */ type_decl = cp_parser_lookup_name_simple (parser, identifier, token->location); if (TREE_CODE (type_decl) == USING_DECL) { if (!DECL_DEPENDENT_P (type_decl)) type_decl = strip_using_decl (type_decl); else if (USING_DECL_TYPENAME_P (type_decl)) { /* We have found a type introduced by a using declaration at class scope that refers to a dependent type. using typename :: [opt] nested-name-specifier unqualified-id ; */ type_decl = make_typename_type (TREE_TYPE (type_decl), DECL_NAME (type_decl), typename_type, tf_error); if (type_decl != error_mark_node) type_decl = TYPE_NAME (type_decl); } } if (TREE_CODE (type_decl) != TYPE_DECL && (objc_is_id (identifier) || objc_is_class_name (identifier))) { /* See if this is an Objective-C type. */ tree protos = cp_parser_objc_protocol_refs_opt (parser); tree type = objc_get_protocol_qualified_type (identifier, protos); if (type) type_decl = TYPE_NAME (type); } /* Issue an error if we did not find a type-name. */ if (TREE_CODE (type_decl) != TYPE_DECL /* In Objective-C, we have the complication that class names are normally type names and start declarations (eg, the "NSObject" in "NSObject *object;"), but can be used in an Objective-C 2.0 dot-syntax (as in "NSObject.version") which is an expression. So, a classname followed by a dot is not a valid type-name. */ || (objc_is_class_name (TREE_TYPE (type_decl)) && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT)) { if (!cp_parser_simulate_error (parser)) cp_parser_name_lookup_error (parser, identifier, type_decl, NLE_TYPE, token->location); return error_mark_node; } /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ else if (type_decl != error_mark_node && !parser->scope) maybe_note_name_used_in_class (identifier, type_decl); return type_decl; } /* Parse an elaborated-type-specifier. Note that the grammar given here incorporates the resolution to DR68. elaborated-type-specifier: class-key :: [opt] nested-name-specifier [opt] identifier class-key :: [opt] nested-name-specifier [opt] template [opt] template-id enum-key :: [opt] nested-name-specifier [opt] identifier typename :: [opt] nested-name-specifier identifier typename :: [opt] nested-name-specifier template [opt] template-id GNU extension: elaborated-type-specifier: class-key attributes :: [opt] nested-name-specifier [opt] identifier class-key attributes :: [opt] nested-name-specifier [opt] template [opt] template-id enum attributes :: [opt] nested-name-specifier [opt] identifier If IS_FRIEND is TRUE, then this elaborated-type-specifier is being declared `friend'. If IS_DECLARATION is TRUE, then this elaborated-type-specifier appears in a decl-specifiers-seq, i.e., something is being declared. Returns the TYPE specified. */ static tree cp_parser_elaborated_type_specifier (cp_parser* parser, bool is_friend, bool is_declaration) { enum tag_types tag_type; tree identifier; tree type = NULL_TREE; tree attributes = NULL_TREE; tree globalscope; cp_token *token = NULL; /* See if we're looking at the `enum' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM)) { /* Consume the `enum' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's an enumeration type. */ tag_type = enum_type; /* Issue a warning if the `struct' or `class' key (for C++0x scoped enums) is used here. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS) || cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT)) { pedwarn (input_location, 0, "elaborated-type-specifier " "for a scoped enum must not use the %<%D%> keyword", cp_lexer_peek_token (parser->lexer)->u.value); /* Consume the `struct' or `class' and parse it anyway. */ cp_lexer_consume_token (parser->lexer); } /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Or, it might be `typename'. */ else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's a `typename' type. */ tag_type = typename_type; } /* Otherwise it must be a class-key. */ else { tag_type = cp_parser_class_key (parser); if (tag_type == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Look for the `::' operator. */ globalscope = cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ if (tag_type == typename_type && !globalscope) { if (!cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration)) return error_mark_node; } else /* Even though `typename' is not present, the proposed resolution to Core Issue 180 says that in `class A<T>::B', `B' should be considered a type-name, even if `A<T>' is dependent. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration); /* For everything but enumeration types, consider a template-id. For an enumeration type, consider only a plain identifier. */ if (tag_type != enum_type) { bool template_p = false; tree decl; /* Allow the `template' keyword. */ template_p = cp_parser_optional_template_keyword (parser); /* If we didn't see `template', we don't know if there's a template-id or not. */ if (!template_p) cp_parser_parse_tentatively (parser); /* Parse the template-id. */ token = cp_lexer_peek_token (parser->lexer); decl = cp_parser_template_id (parser, template_p, /*check_dependency_p=*/true, is_declaration); /* If we didn't find a template-id, look for an ordinary identifier. */ if (!template_p && !cp_parser_parse_definitely (parser)) ; /* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is in effect, then we must assume that, upon instantiation, the template will correspond to a class. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && tag_type == typename_type) type = make_typename_type (parser->scope, decl, typename_type, /*complain=*/tf_error); /* If the `typename' keyword is in effect and DECL is not a type decl, then type is non existent. */ else if (tag_type == typename_type && TREE_CODE (decl) != TYPE_DECL) ; else if (TREE_CODE (decl) == TYPE_DECL) type = check_elaborated_type_specifier (tag_type, decl, /*allow_template_p=*/true); else if (decl == error_mark_node) type = error_mark_node; } if (!type) { token = cp_lexer_peek_token (parser->lexer); identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) { parser->scope = NULL_TREE; return error_mark_node; } /* For a `typename', we needn't call xref_tag. */ if (tag_type == typename_type && TREE_CODE (parser->scope) != NAMESPACE_DECL) return cp_parser_make_typename_type (parser, parser->scope, identifier, token->location); /* Look up a qualified name in the usual way. */ if (parser->scope) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, token->location); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* If we are parsing friend declaration, DECL may be a TEMPLATE_DECL tree node here. However, we need to check whether this TEMPLATE_DECL results in valid code. Consider the following example: namespace N { template <class T> class C {}; } class X { template <class T> friend class N::C; // #1, valid code }; template <class T> class Y { friend class N::C; // #2, invalid code }; For both case #1 and #2, we arrive at a TEMPLATE_DECL after name lookup of `N::C'. We see that friend declaration must be template for the code to be valid. Note that processing_template_decl does not work here since it is always 1 for the above two cases. */ decl = (cp_parser_maybe_treat_template_as_class (decl, /*tag_name_p=*/is_friend && parser->num_template_parameter_lists)); if (TREE_CODE (decl) != TYPE_DECL) { cp_parser_diagnose_invalid_type_name (parser, parser->scope, identifier, token->location); return error_mark_node; } if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE) { bool allow_template = (parser->num_template_parameter_lists || DECL_SELF_REFERENCE_P (decl)); type = check_elaborated_type_specifier (tag_type, decl, allow_template); if (type == error_mark_node) return error_mark_node; } /* Forward declarations of nested types, such as class C1::C2; class C1::C2::C3; are invalid unless all components preceding the final '::' are complete. If all enclosing types are complete, these declarations become merely pointless. Invalid forward declarations of nested types are errors caught elsewhere in parsing. Those that are pointless arrive here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) && !is_friend && !processing_explicit_instantiation) warning (0, "declaration %qD does not declare anything", decl); type = TREE_TYPE (decl); } else { /* An elaborated-type-specifier sometimes introduces a new type and sometimes names an existing type. Normally, the rule is that it introduces a new type only if there is not an existing type of the same name already in scope. For example, given: struct S {}; void f() { struct S s; } the `struct S' in the body of `f' is the same `struct S' as in the global scope; the existing definition is used. However, if there were no global declaration, this would introduce a new local class named `S'. An exception to this rule applies to the following code: namespace N { struct S; } Here, the elaborated-type-specifier names a new type unconditionally; even if there is already an `S' in the containing scope this declaration names a new type. This exception only applies if the elaborated-type-specifier forms the complete declaration: [class.name] A declaration consisting solely of `class-key identifier ;' is either a redeclaration of the name in the current scope or a forward declaration of the identifier as a class name. It introduces the name into the current scope. We are in this situation precisely when the next token is a `;'. An exception to the exception is that a `friend' declaration does *not* name a new type; i.e., given: struct S { friend struct T; }; `T' is not a new type in the scope of `S'. Also, `new struct S' or `sizeof (struct S)' never results in the definition of a new type; a new type can only be declared in a declaration context. */ tag_scope ts; bool template_p; if (is_friend) /* Friends have special name lookup rules. */ ts = ts_within_enclosing_non_class; else if (is_declaration && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) /* This is a `class-key identifier ;' */ ts = ts_current; else ts = ts_global; template_p = (parser->num_template_parameter_lists && (cp_parser_next_token_starts_class_definition_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))); /* An unqualified name was used to reference this type, so there were no qualifying templates. */ if (!cp_parser_check_template_parameters (parser, /*num_templates=*/0, token->location, /*declarator=*/NULL)) return error_mark_node; type = xref_tag (tag_type, identifier, ts, template_p); } } if (type == error_mark_node) return error_mark_node; /* Allow attributes on forward declarations of classes. */ if (attributes) { if (TREE_CODE (type) == TYPENAME_TYPE) warning (OPT_Wattributes, "attributes ignored on uninstantiated type"); else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type) && ! processing_explicit_instantiation) warning (OPT_Wattributes, "attributes ignored on template instantiation"); else if (is_declaration && cp_parser_declares_only_class_p (parser)) cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); else warning (OPT_Wattributes, "attributes ignored on elaborated-type-specifier that is not a forward declaration"); } if (tag_type != enum_type) { /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (tag_type == class_type); cp_parser_check_class_key (tag_type, type); } /* A "<" cannot follow an elaborated type specifier. If that happens, the user was probably trying to form a template-id. */ cp_parser_check_for_invalid_template_id (parser, type, token->location); return type; } /* Parse an enum-specifier. enum-specifier: enum-head { enumerator-list [opt] } enum-head { enumerator-list , } [C++0x] enum-head: enum-key identifier [opt] enum-base [opt] enum-key nested-name-specifier identifier enum-base [opt] enum-key: enum enum class [C++0x] enum struct [C++0x] enum-base: [C++0x] : type-specifier-seq opaque-enum-specifier: enum-key identifier enum-base [opt] ; GNU Extensions: enum-key attributes[opt] identifier [opt] enum-base [opt] { enumerator-list [opt] }attributes[opt] enum-key attributes[opt] identifier [opt] enum-base [opt] { enumerator-list, }attributes[opt] [C++0x] Returns an ENUM_TYPE representing the enumeration, or NULL_TREE if the token stream isn't an enum-specifier after all. */ static tree cp_parser_enum_specifier (cp_parser* parser) { tree identifier; tree type = NULL_TREE; tree prev_scope; tree nested_name_specifier = NULL_TREE; tree attributes; bool scoped_enum_p = false; bool has_underlying_type = false; bool nested_being_defined = false; bool new_value_list = false; bool is_new_type = false; bool is_anonymous = false; tree underlying_type = NULL_TREE; cp_token *type_start_token = NULL; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* Parse tentatively so that we can back up if we don't find a enum-specifier. */ cp_parser_parse_tentatively (parser); /* Caller guarantees that the current token is 'enum', an identifier possibly follows, and the token after that is an opening brace. If we don't have an identifier, fabricate an anonymous name for the enumeration being defined. */ cp_lexer_consume_token (parser->lexer); /* Parse the "class" or "struct", which indicates a scoped enumeration type in C++0x. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS) || cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT)) { if (cxx_dialect < cxx0x) maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS); /* Consume the `struct' or `class' token. */ cp_lexer_consume_token (parser->lexer); scoped_enum_p = true; } attributes = cp_parser_attributes_opt (parser); /* Clear the qualification. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Figure out in what scope the declaration is being placed. */ prev_scope = current_scope (); type_start_token = cp_lexer_peek_token (parser->lexer); push_deferring_access_checks (dk_no_check); nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false); if (nested_name_specifier) { tree name; identifier = cp_parser_identifier (parser); name = cp_parser_lookup_name (parser, identifier, enum_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, input_location); if (name) { type = TREE_TYPE (name); if (TREE_CODE (type) == TYPENAME_TYPE) { /* Are template enums allowed in ISO? */ if (template_parm_scope_p ()) pedwarn (type_start_token->location, OPT_pedantic, "%qD is an enumeration template", name); /* ignore a typename reference, for it will be solved by name in start_enum. */ type = NULL_TREE; } } else error_at (type_start_token->location, "%qD is not an enumerator-name", identifier); } else { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else { identifier = make_anon_name (); is_anonymous = true; } } pop_deferring_access_checks (); /* Check for the `:' that denotes a specified underlying type in C++0x. Note that a ':' could also indicate a bitfield width, however. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_decl_specifier_seq type_specifiers; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifiers); /* At this point this is surely not elaborated type specifier. */ if (!cp_parser_parse_definitely (parser)) return NULL_TREE; if (cxx_dialect < cxx0x) maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS); has_underlying_type = true; /* If that didn't work, stop. */ if (type_specifiers.type != error_mark_node) { underlying_type = grokdeclarator (NULL, &type_specifiers, TYPENAME, /*initialized=*/0, NULL); if (underlying_type == error_mark_node) underlying_type = NULL_TREE; } } /* Look for the `{' but don't consume it yet. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { if (cxx_dialect < cxx0x || (!scoped_enum_p && !underlying_type)) { cp_parser_error (parser, "expected %<{%>"); if (has_underlying_type) { type = NULL_TREE; goto out; } } /* An opaque-enum-specifier must have a ';' here. */ if ((scoped_enum_p || underlying_type) && cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_error (parser, "expected %<;%> or %<{%>"); if (has_underlying_type) { type = NULL_TREE; goto out; } } } if (!has_underlying_type && !cp_parser_parse_definitely (parser)) return NULL_TREE; if (nested_name_specifier) { if (CLASS_TYPE_P (nested_name_specifier)) { nested_being_defined = TYPE_BEING_DEFINED (nested_name_specifier); TYPE_BEING_DEFINED (nested_name_specifier) = 1; push_scope (nested_name_specifier); } else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL) { push_nested_namespace (nested_name_specifier); } } /* Issue an error message if type-definitions are forbidden here. */ if (!cp_parser_check_type_definition (parser)) type = error_mark_node; else /* Create the new type. We do this before consuming the opening brace so the enum will be recorded as being on the line of its tag (or the 'enum' keyword, if there is no tag). */ type = start_enum (identifier, type, underlying_type, scoped_enum_p, &is_new_type); /* If the next token is not '{' it is an opaque-enum-specifier or an elaborated-type-specifier. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { timevar_push (TV_PARSE_ENUM); if (nested_name_specifier) { /* The following catches invalid code such as: enum class S<int>::E { A, B, C }; */ if (!processing_specialization && CLASS_TYPE_P (nested_name_specifier) && CLASSTYPE_USE_TEMPLATE (nested_name_specifier)) error_at (type_start_token->location, "cannot add an enumerator " "list to a template instantiation"); /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ if (prev_scope && !is_ancestor (prev_scope, nested_name_specifier)) { if (at_namespace_scope_p ()) error_at (type_start_token->location, "declaration of %qD in namespace %qD which does not " "enclose %qD", type, prev_scope, nested_name_specifier); else error_at (type_start_token->location, "declaration of %qD in %qD which does not enclose %qD", type, prev_scope, nested_name_specifier); type = error_mark_node; } } if (scoped_enum_p) begin_scope (sk_scoped_enum, type); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); if (type == error_mark_node) ; /* Nothing to add */ else if (OPAQUE_ENUM_P (type) || (cxx_dialect > cxx98 && processing_specialization)) { new_value_list = true; SET_OPAQUE_ENUM_P (type, false); DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location; } else { error_at (type_start_token->location, "multiple definition of %q#T", type); error_at (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "previous definition here"); type = error_mark_node; } if (type == error_mark_node) cp_parser_skip_to_end_of_block_or_statement (parser); /* If the next token is not '}', then there are some enumerators. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) cp_parser_enumerator_list (parser, type); /* Consume the final '}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); if (scoped_enum_p) finish_scope (); timevar_pop (TV_PARSE_ENUM); } else { /* If a ';' follows, then it is an opaque-enum-specifier and additional restrictions apply. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { if (is_anonymous) error_at (type_start_token->location, "opaque-enum-specifier without name"); else if (nested_name_specifier) error_at (type_start_token->location, "opaque-enum-specifier must use a simple identifier"); } } /* Look for trailing attributes to apply to this enumeration, and apply them if appropriate. */ if (cp_parser_allow_gnu_extensions_p (parser)) { tree trailing_attr = cp_parser_attributes_opt (parser); trailing_attr = chainon (trailing_attr, attributes); cplus_decl_attributes (&type, trailing_attr, (int) ATTR_FLAG_TYPE_IN_PLACE); } /* Finish up the enumeration. */ if (type != error_mark_node) { if (new_value_list) finish_enum_value_list (type); if (is_new_type) finish_enum (type); } if (nested_name_specifier) { if (CLASS_TYPE_P (nested_name_specifier)) { TYPE_BEING_DEFINED (nested_name_specifier) = nested_being_defined; pop_scope (nested_name_specifier); } else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL) { pop_nested_namespace (nested_name_specifier); } } out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; return type; } /* Parse an enumerator-list. The enumerators all have the indicated TYPE. enumerator-list: enumerator-definition enumerator-list , enumerator-definition */ static void cp_parser_enumerator_list (cp_parser* parser, tree type) { while (true) { /* Parse an enumerator-definition. */ cp_parser_enumerator_definition (parser, type); /* If the next token is not a ',', we've reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); /* If the next token is a `}', there is a trailing comma. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { if (cxx_dialect < cxx0x && !in_system_header) pedwarn (input_location, OPT_pedantic, "comma at end of enumerator list"); break; } } } /* Parse an enumerator-definition. The enumerator has the indicated TYPE. enumerator-definition: enumerator enumerator = constant-expression enumerator: identifier */ static void cp_parser_enumerator_definition (cp_parser* parser, tree type) { tree identifier; tree value; location_t loc; /* Save the input location because we are interested in the location of the identifier and not the location of the explicit value. */ loc = cp_lexer_peek_token (parser->lexer)->location; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* If the next token is an '=', then there is an explicit value. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the value. */ value = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); } else value = NULL_TREE; /* If we are processing a template, make sure the initializer of the enumerator doesn't contain any bare template parameter pack. */ if (check_for_bare_parameter_packs (value)) value = error_mark_node; /* integral_constant_value will pull out this expression, so make sure it's folded as appropriate. */ value = fold_non_dependent_expr (value); /* Create the enumerator. */ build_enumerator (identifier, value, type, loc); } /* Parse a namespace-name. namespace-name: original-namespace-name namespace-alias Returns the NAMESPACE_DECL for the namespace. */ static tree cp_parser_namespace_name (cp_parser* parser) { tree identifier; tree namespace_decl; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Get the name of the namespace. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the identifier in the currently active scope. Look only for namespaces, due to: [basic.lookup.udir] When looking up a namespace-name in a using-directive or alias definition, only namespace names are considered. And: [basic.lookup.qual] During the lookup of a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. (Note that cp_parser_qualifying_entity only calls this function if the token after the name is the scope resolution operator.) */ namespace_decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/false, /*is_namespace=*/true, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, token->location); /* If it's not a namespace, issue an error. */ if (namespace_decl == error_mark_node || TREE_CODE (namespace_decl) != NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "%qD is not a namespace-name", identifier); cp_parser_error (parser, "expected namespace-name"); namespace_decl = error_mark_node; } return namespace_decl; } /* Parse a namespace-definition. namespace-definition: named-namespace-definition unnamed-namespace-definition named-namespace-definition: original-namespace-definition extension-namespace-definition original-namespace-definition: namespace identifier { namespace-body } extension-namespace-definition: namespace original-namespace-name { namespace-body } unnamed-namespace-definition: namespace { namespace-body } */ static void cp_parser_namespace_definition (cp_parser* parser) { tree identifier, attribs; bool has_visibility; bool is_inline; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_INLINE)) { maybe_warn_cpp0x (CPP0X_INLINE_NAMESPACES); is_inline = true; cp_lexer_consume_token (parser->lexer); } else is_inline = false; /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Get the name of the namespace. We do not attempt to distinguish between an original-namespace-definition and an extension-namespace-definition at this point. The semantic analysis routines are responsible for that. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Parse any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Look for the `{' to start the namespace. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* Start the namespace. */ push_namespace (identifier); /* "inline namespace" is equivalent to a stub namespace definition followed by a strong using directive. */ if (is_inline) { tree name_space = current_namespace; /* Set up namespace association. */ DECL_NAMESPACE_ASSOCIATIONS (name_space) = tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE, DECL_NAMESPACE_ASSOCIATIONS (name_space)); /* Import the contents of the inline namespace. */ pop_namespace (); do_using_directive (name_space); push_namespace (identifier); } has_visibility = handle_namespace_attrs (current_namespace, attribs); /* Parse the body of the namespace. */ cp_parser_namespace_body (parser); if (has_visibility) pop_visibility (1); /* Finish the namespace. */ pop_namespace (); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* Parse a namespace-body. namespace-body: declaration-seq [opt] */ static void cp_parser_namespace_body (cp_parser* parser) { cp_parser_declaration_seq_opt (parser); } /* Parse a namespace-alias-definition. namespace-alias-definition: namespace identifier = qualified-namespace-specifier ; */ static void cp_parser_namespace_alias_definition (cp_parser* parser) { tree identifier; tree namespace_specifier; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* Look for the `=' token. */ if (!cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { error_at (token->location, "%<namespace%> definition is not allowed here"); /* Skip the definition. */ cp_lexer_consume_token (parser->lexer); if (cp_parser_skip_to_closing_brace (parser)) cp_lexer_consume_token (parser->lexer); return; } cp_parser_require (parser, CPP_EQ, RT_EQ); /* Look for the qualified-namespace-specifier. */ namespace_specifier = cp_parser_qualified_namespace_specifier (parser); /* Look for the `;' token. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* Register the alias in the symbol table. */ do_namespace_alias (identifier, namespace_specifier); } /* Parse a qualified-namespace-specifier. qualified-namespace-specifier: :: [opt] nested-name-specifier [opt] namespace-name Returns a NAMESPACE_DECL corresponding to the specified namespace. */ static tree cp_parser_qualified_namespace_specifier (cp_parser* parser) { /* Look for the optional `::'. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); return cp_parser_namespace_name (parser); } /* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an access declaration. using-declaration: using typename [opt] :: [opt] nested-name-specifier unqualified-id ; using :: unqualified-id ; access-declaration: qualified-id ; */ static bool cp_parser_using_declaration (cp_parser* parser, bool access_declaration_p) { cp_token *token; bool typename_p = false; bool global_scope_p; tree decl; tree identifier; tree qscope; int oldcount = errorcount; cp_token *diag_token = NULL; if (access_declaration_p) { diag_token = cp_lexer_peek_token (parser->lexer); cp_parser_parse_tentatively (parser); } else { /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's `typename'. */ if (token->keyword == RID_TYPENAME) { /* Remember that we've seen it. */ typename_p = true; /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); } } /* Look for the optional global scope qualification. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* If we saw `typename', or didn't see `::', then there must be a nested-name-specifier present. */ if (typename_p || !global_scope_p) qscope = cp_parser_nested_name_specifier (parser, typename_p, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Otherwise, we could be in either of the two productions. In that case, treat the nested-name-specifier as optional. */ else qscope = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); if (!qscope) qscope = global_namespace; if (access_declaration_p && cp_parser_error_occurred (parser)) /* Something has already gone wrong; there's no need to parse further. Since an error has occurred, the return value of cp_parser_parse_definitely will be false, as required. */ return cp_parser_parse_definitely (parser); token = cp_lexer_peek_token (parser->lexer); /* Parse the unqualified-id. */ identifier = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*declarator_p=*/true, /*optional_p=*/false); if (access_declaration_p) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) return false; } /* The function we call to handle a using-declaration is different depending on what scope we are in. */ if (qscope == error_mark_node || identifier == error_mark_node) ; else if (TREE_CODE (identifier) != IDENTIFIER_NODE && TREE_CODE (identifier) != BIT_NOT_EXPR) /* [namespace.udecl] A using declaration shall not name a template-id. */ error_at (token->location, "a template-id may not appear in a using-declaration"); else { if (at_class_scope_p ()) { /* Create the USING_DECL. */ decl = do_class_using_decl (parser->scope, identifier); if (decl && typename_p) USING_DECL_TYPENAME_P (decl) = 1; if (check_for_bare_parameter_packs (decl)) return false; else /* Add it to the list of members in this class. */ finish_member_declaration (decl); } else { decl = cp_parser_lookup_name_simple (parser, identifier, token->location); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, identifier, decl, NLE_NULL, token->location); else if (check_for_bare_parameter_packs (decl)) return false; else if (!at_namespace_scope_p ()) do_local_using_decl (decl, qscope, identifier); else do_toplevel_using_decl (decl, qscope, identifier); } } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (access_declaration_p && errorcount == oldcount) warning_at (diag_token->location, OPT_Wdeprecated, "access declarations are deprecated " "in favour of using-declarations; " "suggestion: add the %<using%> keyword"); return true; } /* Parse an alias-declaration. alias-declaration: using identifier attribute-specifier-seq [opt] = type-id */ static tree cp_parser_alias_declaration (cp_parser* parser) { tree id, type, decl, pushed_scope = NULL_TREE, attributes; location_t id_location; cp_declarator *declarator; cp_decl_specifier_seq decl_specs; bool member_p; const char *saved_message = NULL; /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); id_location = cp_lexer_peek_token (parser->lexer)->location; id = cp_parser_identifier (parser); if (id == error_mark_node) return error_mark_node; attributes = cp_parser_attributes_opt (parser); if (attributes == error_mark_node) return error_mark_node; cp_parser_require (parser, CPP_EQ, RT_EQ); if (cp_parser_error_occurred (parser)) return error_mark_node; /* Now we are going to parse the type-id of the declaration. */ /* [dcl.type]/3 says: "A type-specifier-seq shall not define a class or enumeration unless it appears in the type-id of an alias-declaration (7.1.3) that is not the declaration of a template-declaration." In other words, if we currently are in an alias template, the type-id should not define a type. So let's set parser->type_definition_forbidden_message in that case; cp_parser_check_type_definition (called by cp_parser_class_specifier) will then emit an error if a type is defined in the type-id. */ if (parser->num_template_parameter_lists) { saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in alias template declarations"); } type = cp_parser_type_id (parser); /* Restore the error message if need be. */ if (parser->num_template_parameter_lists) parser->type_definition_forbidden_message = saved_message; cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (cp_parser_error_occurred (parser)) return error_mark_node; /* A typedef-name can also be introduced by an alias-declaration. The identifier following the using keyword becomes a typedef-name. It has the same semantics as if it were introduced by the typedef specifier. In particular, it does not define a new type and it shall not appear in the type-id. */ clear_decl_specs (&decl_specs); decl_specs.type = type; decl_specs.attributes = attributes; ++decl_specs.specs[(int) ds_typedef]; ++decl_specs.specs[(int) ds_alias]; declarator = make_id_declarator (NULL_TREE, id, sfk_none); declarator->id_loc = id_location; member_p = at_class_scope_p (); if (member_p) decl = grokfield (declarator, &decl_specs, NULL_TREE, false, NULL_TREE, attributes); else decl = start_decl (declarator, &decl_specs, 0, attributes, NULL_TREE, &pushed_scope); if (decl == error_mark_node) return decl; cp_finish_decl (decl, NULL_TREE, 0, NULL_TREE, 0); if (pushed_scope) pop_scope (pushed_scope); /* If decl is a template, return its TEMPLATE_DECL so that it gets added into the symbol table; otherwise, return the TYPE_DECL. */ if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl))) { decl = DECL_TI_TEMPLATE (decl); if (member_p) check_member_template (decl); } return decl; } /* Parse a using-directive. using-directive: using namespace :: [opt] nested-name-specifier [opt] namespace-name ; */ static void cp_parser_using_directive (cp_parser* parser) { tree namespace_decl; tree attribs; /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); /* And the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* And the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Get the namespace being used. */ namespace_decl = cp_parser_namespace_name (parser); /* And any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Update the symbol table. */ parse_using_directive (namespace_decl, attribs); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } /* Parse an asm-definition. asm-definition: asm ( string-literal ) ; GNU Extension: asm-definition: asm volatile [opt] ( string-literal ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] : asm-clobber-list [opt] ) ; asm volatile [opt] goto ( string-literal : : asm-operand-list [opt] : asm-clobber-list [opt] : asm-goto-list ) ; */ static void cp_parser_asm_definition (cp_parser* parser) { tree string; tree outputs = NULL_TREE; tree inputs = NULL_TREE; tree clobbers = NULL_TREE; tree labels = NULL_TREE; tree asm_stmt; bool volatile_p = false; bool extended_p = false; bool invalid_inputs_p = false; bool invalid_outputs_p = false; bool goto_p = false; required_token missing = RT_NONE; /* Look for the `asm' keyword. */ cp_parser_require_keyword (parser, RID_ASM, RT_ASM); /* See if the next token is `volatile'. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE)) { /* Remember that we saw the `volatile' keyword. */ volatile_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && cp_lexer_next_token_is_keyword (parser->lexer, RID_GOTO)) { /* Remember that we saw the `goto' keyword. */ goto_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } /* Look for the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return; /* Look for the string. */ string = cp_parser_string_literal (parser, false, false); if (string == error_mark_node) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return; } /* If we're allowing GNU extensions, check for the extended assembly syntax. Unfortunately, the `:' tokens need not be separated by a space in C, and so, for compatibility, we tolerate that here too. Doing that means that we have to treat the `::' operator as two `:' tokens. */ if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && (cp_lexer_next_token_is (parser->lexer, CPP_COLON) || cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))) { bool inputs_p = false; bool clobbers_p = false; bool labels_p = false; /* The extended syntax was used. */ extended_p = true; /* Look for outputs. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && !goto_p) outputs = cp_parser_asm_operand_list (parser); if (outputs == error_mark_node) invalid_outputs_p = true; } /* If the next token is `::', there are no outputs, and the next token is the beginning of the inputs. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The inputs are coming next. */ inputs_p = true; /* Look for inputs. */ if (inputs_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) inputs = cp_parser_asm_operand_list (parser); if (inputs == error_mark_node) invalid_inputs_p = true; } else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The clobbers are coming next. */ clobbers_p = true; /* Look for clobbers. */ if (clobbers_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { clobbers_p = true; /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the clobbers. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) clobbers = cp_parser_asm_clobber_list (parser); } else if (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The labels are coming next. */ labels_p = true; /* Look for labels. */ if (labels_p || (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_COLON))) { labels_p = true; /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the labels. */ labels = cp_parser_asm_label_list (parser); } if (goto_p && !labels_p) missing = clobbers_p ? RT_COLON : RT_COLON_SCOPE; } else if (goto_p) missing = RT_COLON_SCOPE; /* Look for the closing `)'. */ if (!cp_parser_require (parser, missing ? CPP_COLON : CPP_CLOSE_PAREN, missing ? missing : RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (!invalid_inputs_p && !invalid_outputs_p) { /* Create the ASM_EXPR. */ if (parser->in_function_body) { asm_stmt = finish_asm_stmt (volatile_p, string, outputs, inputs, clobbers, labels); /* If the extended syntax was not used, mark the ASM_EXPR. */ if (!extended_p) { tree temp = asm_stmt; if (TREE_CODE (temp) == CLEANUP_POINT_EXPR) temp = TREE_OPERAND (temp, 0); ASM_INPUT_P (temp) = 1; } } else cgraph_add_asm_node (string); } } /* Declarators [gram.dcl.decl] */ /* Parse an init-declarator. init-declarator: declarator initializer [opt] GNU Extension: init-declarator: declarator asm-specification [opt] attributes [opt] initializer [opt] function-definition: decl-specifier-seq [opt] declarator ctor-initializer [opt] function-body decl-specifier-seq [opt] declarator function-try-block GNU Extension: function-definition: __extension__ function-definition TM Extension: function-definition: decl-specifier-seq [opt] declarator function-transaction-block The DECL_SPECIFIERS apply to this declarator. Returns a representation of the entity declared. If MEMBER_P is TRUE, then this declarator appears in a class scope. The new DECL created by this declarator is returned. The CHECKS are access checks that should be performed once we know what entity is being declared (and, therefore, what classes have befriended it). If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and for a function-definition here as well. If the declarator is a declarator for a function-definition, *FUNCTION_DEFINITION_P will be TRUE upon return. By that point, the function-definition will have been completely parsed. FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P is FALSE. If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the parsed declaration if it is an uninitialized single declarator not followed by a `;', or to error_mark_node otherwise. Either way, the trailing `;', if present, will not be consumed. If returned, this declarator will be created with SD_INITIALIZED but will not call cp_finish_decl. */ static tree cp_parser_init_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, VEC (deferred_access_check,gc)* checks, bool function_definition_allowed_p, bool member_p, int declares_class_or_enum, bool* function_definition_p, tree* maybe_range_for_decl) { cp_token *token = NULL, *asm_spec_start_token = NULL, *attributes_start_token = NULL; cp_declarator *declarator; tree prefix_attributes; tree attributes; tree asm_specification; tree initializer; tree decl = NULL_TREE; tree scope; int is_initialized; /* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if initialized with "= ..", CPP_OPEN_PAREN if initialized with "(...)". */ enum cpp_ttype initialization_kind; bool is_direct_init = false; bool is_non_constant_init; int ctor_dtor_or_conv_p; bool friend_p; tree pushed_scope = NULL_TREE; bool range_for_decl_p = false; /* Gather the attributes that were provided with the decl-specifiers. */ prefix_attributes = decl_specifiers->attributes; /* Assume that this is not the declarator for a function definition. */ if (function_definition_p) *function_definition_p = false; /* Defer access checks while parsing the declarator; we cannot know what names are accessible until we know what is being declared. */ resume_deferring_access_checks (); /* Parse the declarator. */ token = cp_lexer_peek_token (parser->lexer); declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p); /* Gather up the deferred checks. */ stop_deferring_access_checks (); /* If the DECLARATOR was erroneous, there's no need to go further. */ if (declarator == cp_error_declarator) return error_mark_node; /* Check that the number of template-parameter-lists is OK. */ if (!cp_parser_check_declarator_template_parameters (parser, declarator, token->location)) return error_mark_node; if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers->type, decl_specifiers->type_location); /* Figure out what scope the entity declared by the DECLARATOR is located in. `grokdeclarator' sometimes changes the scope, so we compute it now. */ scope = get_scope_of_declarator (declarator); /* Perform any lookups in the declared type which were thought to be dependent, but are not in the scope of the declarator. */ decl_specifiers->type = maybe_update_decl_type (decl_specifiers->type, scope); /* If we're allowing GNU extensions, look for an asm-specification and attributes. */ if (cp_parser_allow_gnu_extensions_p (parser)) { /* Look for an asm-specification. */ asm_spec_start_token = cp_lexer_peek_token (parser->lexer); asm_specification = cp_parser_asm_specification_opt (parser); /* And attributes. */ attributes_start_token = cp_lexer_peek_token (parser->lexer); attributes = cp_parser_attributes_opt (parser); } else { asm_specification = NULL_TREE; attributes = NULL_TREE; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check to see if the token indicates the start of a function-definition. */ if (function_declarator_p (declarator) && cp_parser_token_starts_function_definition_p (token)) { if (!function_definition_allowed_p) { /* If a function-definition should not appear here, issue an error message. */ cp_parser_error (parser, "a function-definition is not allowed here"); return error_mark_node; } else { location_t func_brace_location = cp_lexer_peek_token (parser->lexer)->location; /* Neither attributes nor an asm-specification are allowed on a function-definition. */ if (asm_specification) error_at (asm_spec_start_token->location, "an asm-specification is not allowed " "on a function-definition"); if (attributes) error_at (attributes_start_token->location, "attributes are not allowed on a function-definition"); /* This is a function-definition. */ *function_definition_p = true; /* Parse the function definition. */ if (member_p) decl = cp_parser_save_member_function_body (parser, decl_specifiers, declarator, prefix_attributes); else decl = (cp_parser_function_definition_from_specifiers_and_declarator (parser, decl_specifiers, prefix_attributes, declarator)); if (decl != error_mark_node && DECL_STRUCT_FUNCTION (decl)) { /* This is where the prologue starts... */ DECL_STRUCT_FUNCTION (decl)->function_start_locus = func_brace_location; } return decl; } } /* [dcl.dcl] Only in function declarations for constructors, destructors, and type conversions can the decl-specifier-seq be omitted. We explicitly postpone this check past the point where we handle function-definitions because we tolerate function-definitions that are missing their return types in some modes. */ if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0) { cp_parser_error (parser, "expected constructor, destructor, or type conversion"); return error_mark_node; } /* An `=' or an `(', or an '{' in C++0x, indicates an initializer. */ if (token->type == CPP_EQ || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_BRACE) { is_initialized = SD_INITIALIZED; initialization_kind = token->type; if (maybe_range_for_decl) *maybe_range_for_decl = error_mark_node; if (token->type == CPP_EQ && function_declarator_p (declarator)) { cp_token *t2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (t2->keyword == RID_DEFAULT) is_initialized = SD_DEFAULTED; else if (t2->keyword == RID_DELETE) is_initialized = SD_DELETED; } } else { /* If the init-declarator isn't initialized and isn't followed by a `,' or `;', it's not a valid init-declarator. */ if (token->type != CPP_COMMA && token->type != CPP_SEMICOLON) { if (maybe_range_for_decl && *maybe_range_for_decl != error_mark_node) range_for_decl_p = true; else { cp_parser_error (parser, "expected initializer"); return error_mark_node; } } is_initialized = SD_UNINITIALIZED; initialization_kind = CPP_EOF; } /* Because start_decl has side-effects, we should only call it if we know we're going ahead. By this point, we know that we cannot possibly be looking at any other construct. */ cp_parser_commit_to_tentative_parse (parser); /* If the decl specifiers were bad, issue an error now that we're sure this was intended to be a declarator. Then continue declaring the variable(s), as int, to try to cut down on further errors. */ if (decl_specifiers->any_specifiers_p && decl_specifiers->type == error_mark_node) { cp_parser_error (parser, "invalid type in declaration"); decl_specifiers->type = integer_type_node; } /* Check to see whether or not this declaration is a friend. */ friend_p = cp_parser_friend_p (decl_specifiers); /* Enter the newly declared entry in the symbol table. If we're processing a declaration in a class-specifier, we wait until after processing the initializer. */ if (!member_p) { if (parser->in_unbraced_linkage_specification_p) decl_specifiers->storage_class = sc_extern; decl = start_decl (declarator, decl_specifiers, range_for_decl_p? SD_INITIALIZED : is_initialized, attributes, prefix_attributes, &pushed_scope); /* Adjust location of decl if declarator->id_loc is more appropriate: set, and decl wasn't merged with another decl, in which case its location would be different from input_location, and more accurate. */ if (DECL_P (decl) && declarator->id_loc != UNKNOWN_LOCATION && DECL_SOURCE_LOCATION (decl) == input_location) DECL_SOURCE_LOCATION (decl) = declarator->id_loc; } else if (scope) /* Enter the SCOPE. That way unqualified names appearing in the initializer will be looked up in SCOPE. */ pushed_scope = push_scope (scope); /* Perform deferred access control checks, now that we know in which SCOPE the declared entity resides. */ if (!member_p && decl) { tree saved_current_function_decl = NULL_TREE; /* If the entity being declared is a function, pretend that we are in its scope. If it is a `friend', it may have access to things that would not otherwise be accessible. */ if (TREE_CODE (decl) == FUNCTION_DECL) { saved_current_function_decl = current_function_decl; current_function_decl = decl; } /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); /* Perform the access control checks for the declarator and the decl-specifiers. */ perform_deferred_access_checks (); /* Restore the saved value. */ if (TREE_CODE (decl) == FUNCTION_DECL) current_function_decl = saved_current_function_decl; } /* Parse the initializer. */ initializer = NULL_TREE; is_direct_init = false; is_non_constant_init = true; if (is_initialized) { if (function_declarator_p (declarator)) { cp_token *initializer_start_token = cp_lexer_peek_token (parser->lexer); if (initialization_kind == CPP_EQ) initializer = cp_parser_pure_specifier (parser); else { /* If the declaration was erroneous, we don't really know what the user intended, so just silently consume the initializer. */ if (decl != error_mark_node) error_at (initializer_start_token->location, "initializer provided for function"); cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } else { /* We want to record the extra mangling scope for in-class initializers of class members and initializers of static data member templates. The former involves deferring parsing of the initializer until end of class as with default arguments. So right here we only handle the latter. */ if (!member_p && processing_template_decl) start_lambda_scope (decl); initializer = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); if (!member_p && processing_template_decl) finish_lambda_scope (); } } /* The old parser allows attributes to appear after a parenthesized initializer. Mark Mitchell proposed removing this functionality on the GCC mailing lists on 2002-08-13. This parser accepts the attributes -- but ignores them. */ if (cp_parser_allow_gnu_extensions_p (parser) && initialization_kind == CPP_OPEN_PAREN) if (cp_parser_attributes_opt (parser)) warning (OPT_Wattributes, "attributes after parenthesized initializer ignored"); /* For an in-class declaration, use `grokfield' to create the declaration. */ if (member_p) { if (pushed_scope) { pop_scope (pushed_scope); pushed_scope = NULL_TREE; } decl = grokfield (declarator, decl_specifiers, initializer, !is_non_constant_init, /*asmspec=*/NULL_TREE, prefix_attributes); if (decl && TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); } /* Finish processing the declaration. But, skip member declarations. */ if (!member_p && decl && decl != error_mark_node && !range_for_decl_p) { cp_finish_decl (decl, initializer, !is_non_constant_init, asm_specification, /* If the initializer is in parentheses, then this is a direct-initialization, which means that an `explicit' constructor is OK. Otherwise, an `explicit' constructor cannot be used. */ ((is_direct_init || !is_initialized) ? LOOKUP_NORMAL : LOOKUP_IMPLICIT)); } else if ((cxx_dialect != cxx98) && friend_p && decl && TREE_CODE (decl) == FUNCTION_DECL) /* Core issue #226 (C++0x only): A default template-argument shall not be specified in a friend class template declaration. */ check_default_tmpl_args (decl, current_template_parms, /*is_primary=*/1, /*is_partial=*/0, /*is_friend_decl=*/1); if (!friend_p && pushed_scope) pop_scope (pushed_scope); return decl; } /* Parse a declarator. declarator: direct-declarator ptr-operator declarator abstract-declarator: ptr-operator abstract-declarator [opt] direct-abstract-declarator GNU Extensions: declarator: attributes [opt] direct-declarator attributes [opt] ptr-operator declarator abstract-declarator: attributes [opt] ptr-operator abstract-declarator [opt] attributes [opt] direct-abstract-declarator If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to detect constructor, destructor or conversion operators. It is set to -1 if the declarator is a name, and +1 if it is a function. Otherwise it is set to zero. Usually you just want to test for >0, but internally the negative value is used. (The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have a decl-specifier-seq unless it declares a constructor, destructor, or conversion. It might seem that we could check this condition in semantic analysis, rather than parsing, but that makes it difficult to handle something like `f()'. We want to notice that there are no decl-specifiers, and therefore realize that this is an expression, not a declaration.) If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is a direct-declarator of the form "(...)". MEMBER_P is true iff this declarator is a member-declarator. */ static cp_declarator * cp_parser_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool* parenthesized_p, bool member_p) { cp_declarator *declarator; enum tree_code code; cp_cv_quals cv_quals; tree class_type; tree attributes = NULL_TREE; /* Assume this is not a constructor, destructor, or type-conversion operator. */ if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_attributes_opt (parser); /* Check for the ptr-operator production. */ cp_parser_parse_tentatively (parser); /* Parse the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals); /* If that worked, then we have a ptr-operator. */ if (cp_parser_parse_definitely (parser)) { /* If a ptr-operator was found, then this declarator was not parenthesized. */ if (parenthesized_p) *parenthesized_p = true; /* The dependent declarator is optional if we are parsing an abstract-declarator. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED) cp_parser_parse_tentatively (parser); /* Parse the dependent declarator. */ declarator = cp_parser_declarator (parser, dcl_kind, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* If we are parsing an abstract-declarator, we must handle the case where the dependent declarator is absent. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED && !cp_parser_parse_definitely (parser)) declarator = NULL; declarator = cp_parser_make_indirect_declarator (code, class_type, cv_quals, declarator); } /* Everything else is a direct-declarator. */ else { if (parenthesized_p) *parenthesized_p = cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN); declarator = cp_parser_direct_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, member_p); } if (attributes && declarator && declarator != cp_error_declarator) declarator->attributes = attributes; return declarator; } /* Parse a direct-declarator or direct-abstract-declarator. direct-declarator: declarator-id direct-declarator ( parameter-declaration-clause ) cv-qualifier-seq [opt] exception-specification [opt] direct-declarator [ constant-expression [opt] ] ( declarator ) direct-abstract-declarator: direct-abstract-declarator [opt] ( parameter-declaration-clause ) cv-qualifier-seq [opt] exception-specification [opt] direct-abstract-declarator [opt] [ constant-expression [opt] ] ( abstract-declarator ) Returns a representation of the declarator. DCL_KIND is CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if we are parsing a direct-declarator. It is CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case of ambiguity we prefer an abstract declarator, as per [dcl.ambig.res]. CTOR_DTOR_OR_CONV_P and MEMBER_P are as for cp_parser_declarator. */ static cp_declarator * cp_parser_direct_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool member_p) { cp_token *token; cp_declarator *declarator = NULL; tree scope = NULL_TREE; bool saved_default_arg_ok_p = parser->default_arg_ok_p; bool saved_in_declarator_p = parser->in_declarator_p; bool first = true; tree pushed_scope = NULL_TREE; while (true) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_PAREN) { /* This is either a parameter-declaration-clause, or a parenthesized declarator. When we know we are parsing a named declarator, it must be a parenthesized declarator if FIRST is true. For instance, `(int)' is a parameter-declaration-clause, with an omitted direct-abstract-declarator. But `((*))', is a parenthesized abstract declarator. Finally, when T is a template parameter `(T)' is a parameter-declaration-clause, and not a parenthesized named declarator. We first try and parse a parameter-declaration-clause, and then try a nested declarator (if FIRST is true). It is not an error for it not to be a parameter-declaration-clause, even when FIRST is false. Consider, int i (int); int i (3); The first is the declaration of a function while the second is the definition of a variable, including its initializer. Having seen only the parenthesis, we cannot know which of these two alternatives should be selected. Even more complex are examples like: int i (int (a)); int i (int (3)); The former is a function-declaration; the latter is a variable initialization. Thus again, we try a parameter-declaration-clause, and if that fails, we back out and return. */ if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) { tree params; unsigned saved_num_template_parameter_lists; bool is_declarator = false; tree t; /* In a member-declarator, the only valid interpretation of a parenthesis is the start of a parameter-declaration-clause. (It is invalid to initialize a static data member with a parenthesized initializer; only the "=" form of initialization is permitted.) */ if (!member_p) cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); if (first) { /* If this is going to be an abstract declarator, we're in a declarator and we can't have default args. */ parser->default_arg_ok_p = false; parser->in_declarator_p = true; } /* Inside the function parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; begin_scope (sk_function_parms, NULL_TREE); /* Parse the parameter-declaration-clause. */ params = cp_parser_parameter_declaration_clause (parser); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* Consume the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, parse the cv-qualifier-seq and the exception-specification. */ if (member_p || cp_parser_parse_definitely (parser)) { cp_cv_quals cv_quals; cp_virt_specifiers virt_specifiers; tree exception_specification; tree late_return; is_declarator = true; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0; first = false; /* Parse the cv-qualifier-seq. */ cv_quals = cp_parser_cv_qualifier_seq_opt (parser); /* And the exception-specification. */ exception_specification = cp_parser_exception_specification_opt (parser); /* Parse the virt-specifier-seq. */ virt_specifiers = cp_parser_virt_specifier_seq_opt (parser); late_return = (cp_parser_late_return_type_opt (parser, member_p ? cv_quals : -1)); /* Create the function-declarator. */ declarator = make_call_declarator (declarator, params, cv_quals, virt_specifiers, exception_specification, late_return); /* Any subsequent parameter lists are to do with return type, so are not those of the declared function. */ parser->default_arg_ok_p = false; } /* Remove the function parms from scope. */ for (t = current_binding_level->names; t; t = DECL_CHAIN (t)) pop_binding (DECL_NAME (t), t); leave_scope(); if (is_declarator) /* Repeat the main loop. */ continue; } /* If this is the first, we can try a parenthesized declarator. */ if (first) { bool saved_in_type_id_in_expr_p; parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the nested declarator. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; declarator = cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; first = false; /* Expect a `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) declarator = cp_error_declarator; if (declarator == cp_error_declarator) break; goto handle_declarator; } /* Otherwise, we must be done. */ else break; } else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) && token->type == CPP_OPEN_SQUARE) { /* Parse an array-declarator. */ tree bounds; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; first = false; parser->default_arg_ok_p = false; parser->in_declarator_p = true; /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is `]', then there is no constant-expression. */ if (token->type != CPP_CLOSE_SQUARE) { bool non_constant_p; bounds = cp_parser_constant_expression (parser, /*allow_non_constant=*/true, &non_constant_p); if (!non_constant_p) /* OK */; else if (error_operand_p (bounds)) /* Already gave an error. */; else if (!parser->in_function_body || current_binding_level->kind == sk_function_parms) { /* Normally, the array bound must be an integral constant expression. However, as an extension, we allow VLAs in function scopes as long as they aren't part of a parameter declaration. */ cp_parser_error (parser, "array bound is not an integer constant"); bounds = error_mark_node; } else if (processing_template_decl) { /* Remember this wasn't a constant-expression. */ bounds = build_nop (TREE_TYPE (bounds), bounds); TREE_SIDE_EFFECTS (bounds) = 1; } } else bounds = NULL_TREE; /* Look for the closing `]'. */ if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE)) { declarator = cp_error_declarator; break; } declarator = make_array_declarator (declarator, bounds); } else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT) { { tree qualifying_scope; tree unqualified_name; special_function_kind sfk; bool abstract_ok; bool pack_expansion_p = false; cp_token *declarator_id_start_token; /* Parse a declarator-id */ abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER); if (abstract_ok) { cp_parser_parse_tentatively (parser); /* If we see an ellipsis, we should be looking at a parameter pack. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' */ cp_lexer_consume_token (parser->lexer); pack_expansion_p = true; } } declarator_id_start_token = cp_lexer_peek_token (parser->lexer); unqualified_name = cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok); qualifying_scope = parser->scope; if (abstract_ok) { bool okay = false; if (!unqualified_name && pack_expansion_p) { /* Check whether an error occurred. */ okay = !cp_parser_error_occurred (parser); /* We already consumed the ellipsis to mark a parameter pack, but we have no way to report it, so abort the tentative parse. We will be exiting immediately anyway. */ cp_parser_abort_tentative_parse (parser); } else okay = cp_parser_parse_definitely (parser); if (!okay) unqualified_name = error_mark_node; else if (unqualified_name && (qualifying_scope || (TREE_CODE (unqualified_name) != IDENTIFIER_NODE))) { cp_parser_error (parser, "expected unqualified-id"); unqualified_name = error_mark_node; } } if (!unqualified_name) return NULL; if (unqualified_name == error_mark_node) { declarator = cp_error_declarator; pack_expansion_p = false; declarator->parameter_pack_p = false; break; } if (qualifying_scope && at_namespace_scope_p () && TREE_CODE (qualifying_scope) == TYPENAME_TYPE) { /* In the declaration of a member of a template class outside of the class itself, the SCOPE will sometimes be a TYPENAME_TYPE. For example, given: template <typename T> int S<T>::R::i = 3; the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In this context, we must resolve S<T>::R to an ordinary type, rather than a typename type. The reason we normally avoid resolving TYPENAME_TYPEs is that a specialization of `S' might render `S<T>::R' not a type. However, if `S' is specialized, then this `i' will not be used, so there is no harm in resolving the types here. */ tree type; /* Resolve the TYPENAME_TYPE. */ type = resolve_typename_type (qualifying_scope, /*only_current_p=*/false); /* If that failed, the declarator is invalid. */ if (TREE_CODE (type) == TYPENAME_TYPE) { if (typedef_variant_p (type)) error_at (declarator_id_start_token->location, "cannot define member of dependent typedef " "%qT", type); else error_at (declarator_id_start_token->location, "%<%T::%E%> is not a type", TYPE_CONTEXT (qualifying_scope), TYPE_IDENTIFIER (qualifying_scope)); } qualifying_scope = type; } sfk = sfk_none; if (unqualified_name) { tree class_type; if (qualifying_scope && CLASS_TYPE_P (qualifying_scope)) class_type = qualifying_scope; else class_type = current_class_type; if (TREE_CODE (unqualified_name) == TYPE_DECL) { tree name_type = TREE_TYPE (unqualified_name); if (class_type && same_type_p (name_type, class_type)) { if (qualifying_scope && CLASSTYPE_USE_TEMPLATE (name_type)) { error_at (declarator_id_start_token->location, "invalid use of constructor as a template"); inform (declarator_id_start_token->location, "use %<%T::%D%> instead of %<%T::%D%> to " "name the constructor in a qualified name", class_type, DECL_NAME (TYPE_TI_TEMPLATE (class_type)), class_type, name_type); declarator = cp_error_declarator; break; } else unqualified_name = constructor_name (class_type); } else { /* We do not attempt to print the declarator here because we do not have enough information about its original syntactic form. */ cp_parser_error (parser, "invalid declarator"); declarator = cp_error_declarator; break; } } if (class_type) { if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR) sfk = sfk_destructor; else if (IDENTIFIER_TYPENAME_P (unqualified_name)) sfk = sfk_conversion; else if (/* There's no way to declare a constructor for an anonymous type, even if the type got a name for linkage purposes. */ !TYPE_WAS_ANONYMOUS (class_type) && constructor_name_p (unqualified_name, class_type)) { unqualified_name = constructor_name (class_type); sfk = sfk_constructor; } else if (is_overloaded_fn (unqualified_name) && DECL_CONSTRUCTOR_P (get_first_fn (unqualified_name))) sfk = sfk_constructor; if (ctor_dtor_or_conv_p && sfk != sfk_none) *ctor_dtor_or_conv_p = -1; } } declarator = make_id_declarator (qualifying_scope, unqualified_name, sfk); declarator->id_loc = token->location; declarator->parameter_pack_p = pack_expansion_p; if (pack_expansion_p) maybe_warn_variadic_templates (); } handle_declarator:; scope = get_scope_of_declarator (declarator); if (scope) /* Any names that appear after the declarator-id for a member are looked up in the containing scope. */ pushed_scope = push_scope (scope); parser->in_declarator_p = true; if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p) || (declarator && declarator->kind == cdk_id)) /* Default args are only allowed on function declarations. */ parser->default_arg_ok_p = saved_default_arg_ok_p; else parser->default_arg_ok_p = false; first = false; } /* We're done. */ else break; } /* For an abstract declarator, we might wind up with nothing at this point. That's an error; the declarator is not optional. */ if (!declarator) cp_parser_error (parser, "expected declarator"); /* If we entered a scope, we must exit it now. */ if (pushed_scope) pop_scope (pushed_scope); parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; return declarator; } /* Parse a ptr-operator. ptr-operator: * cv-qualifier-seq [opt] & :: [opt] nested-name-specifier * cv-qualifier-seq [opt] GNU Extension: ptr-operator: & cv-qualifier-seq [opt] Returns INDIRECT_REF if a pointer, or pointer-to-member, was used. Returns ADDR_EXPR if a reference was used, or NON_LVALUE_EXPR for an rvalue reference. In the case of a pointer-to-member, *TYPE is filled in with the TYPE containing the member. *CV_QUALS is filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there are no cv-qualifiers. Returns ERROR_MARK if an error occurred. Note that the tree codes returned by this function have nothing to do with the types of trees that will be eventually be created to represent the pointer or reference type being parsed. They are just constants with suggestive names. */ static enum tree_code cp_parser_ptr_operator (cp_parser* parser, tree* type, cp_cv_quals *cv_quals) { enum tree_code code = ERROR_MARK; cp_token *token; /* Assume that it's not a pointer-to-member. */ *type = NULL_TREE; /* And that there are no cv-qualifiers. */ *cv_quals = TYPE_UNQUALIFIED; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `*', `&' or `&&' we have a pointer or reference. */ if (token->type == CPP_MULT) code = INDIRECT_REF; else if (token->type == CPP_AND) code = ADDR_EXPR; else if ((cxx_dialect != cxx98) && token->type == CPP_AND_AND) /* C++0x only */ code = NON_LVALUE_EXPR; if (code != ERROR_MARK) { /* Consume the `*', `&' or `&&'. */ cp_lexer_consume_token (parser->lexer); /* A `*' can be followed by a cv-qualifier-seq, and so can a `&', if we are allowing GNU extensions. (The only qualifier that can legally appear after `&' is `restrict', but that is enforced during semantic analysis. */ if (code == INDIRECT_REF || cp_parser_allow_gnu_extensions_p (parser)) *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } else { /* Try the pointer-to-member case. */ cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name specifier. */ token = cp_lexer_peek_token (parser->lexer); cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false); /* If we found it, and the next token is a `*', then we are indeed looking at a pointer-to-member operator. */ if (!cp_parser_error_occurred (parser) && cp_parser_require (parser, CPP_MULT, RT_MULT)) { /* Indicate that the `*' operator was used. */ code = INDIRECT_REF; if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error_at (token->location, "%qD is a namespace", parser->scope); else if (TREE_CODE (parser->scope) == ENUMERAL_TYPE) error_at (token->location, "cannot form pointer to member of " "non-class %q#T", parser->scope); else { /* The type of which the member is a member is given by the current SCOPE. */ *type = parser->scope; /* The next name will not be qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Look for the optional cv-qualifier-seq. */ *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } } /* If that didn't work we don't have a ptr-operator. */ if (!cp_parser_parse_definitely (parser)) cp_parser_error (parser, "expected ptr-operator"); } return code; } /* Parse an (optional) cv-qualifier-seq. cv-qualifier-seq: cv-qualifier cv-qualifier-seq [opt] cv-qualifier: const volatile GNU Extension: cv-qualifier: __restrict__ Returns a bitmask representing the cv-qualifiers. */ static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser* parser) { cp_cv_quals cv_quals = TYPE_UNQUALIFIED; while (true) { cp_token *token; cp_cv_quals cv_qualifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a cv-qualifier. */ switch (token->keyword) { case RID_CONST: cv_qualifier = TYPE_QUAL_CONST; break; case RID_VOLATILE: cv_qualifier = TYPE_QUAL_VOLATILE; break; case RID_RESTRICT: cv_qualifier = TYPE_QUAL_RESTRICT; break; default: cv_qualifier = TYPE_UNQUALIFIED; break; } if (!cv_qualifier) break; if (cv_quals & cv_qualifier) { error_at (token->location, "duplicate cv-qualifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); cv_quals |= cv_qualifier; } } return cv_quals; } /* Parse an (optional) virt-specifier-seq. virt-specifier-seq: virt-specifier virt-specifier-seq [opt] virt-specifier: override final Returns a bitmask representing the virt-specifiers. */ static cp_virt_specifiers cp_parser_virt_specifier_seq_opt (cp_parser* parser) { cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; while (true) { cp_token *token; cp_virt_specifiers virt_specifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a virt-specifier-qualifier. */ if (token->type != CPP_NAME) break; if (!strcmp (IDENTIFIER_POINTER(token->u.value), "override")) { maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS); virt_specifier = VIRT_SPEC_OVERRIDE; } else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "final")) { maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS); virt_specifier = VIRT_SPEC_FINAL; } else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "__final")) { virt_specifier = VIRT_SPEC_FINAL; } else break; if (virt_specifiers & virt_specifier) { error_at (token->location, "duplicate virt-specifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); virt_specifiers |= virt_specifier; } } return virt_specifiers; } /* Used by handling of trailing-return-types and NSDMI, in which 'this' is in scope even though it isn't real. */ static void inject_this_parameter (tree ctype, cp_cv_quals quals) { tree this_parm; if (current_class_ptr) { /* We don't clear this between NSDMIs. Is it already what we want? */ tree type = TREE_TYPE (TREE_TYPE (current_class_ptr)); if (same_type_ignoring_top_level_qualifiers_p (ctype, type) && cp_type_quals (type) == quals) return; } this_parm = build_this_parm (ctype, quals); /* Clear this first to avoid shortcut in cp_build_indirect_ref. */ current_class_ptr = NULL_TREE; current_class_ref = cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error); current_class_ptr = this_parm; } /* Parse a late-specified return type, if any. This is not a separate non-terminal, but part of a function declarator, which looks like -> trailing-type-specifier-seq abstract-declarator(opt) Returns the type indicated by the type-id. QUALS is either a bitmask of cv_qualifiers or -1 for a non-member function. */ static tree cp_parser_late_return_type_opt (cp_parser* parser, cp_cv_quals quals) { cp_token *token; tree type; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* A late-specified return type is indicated by an initial '->'. */ if (token->type != CPP_DEREF) return NULL_TREE; /* Consume the ->. */ cp_lexer_consume_token (parser->lexer); tree save_ccp = current_class_ptr; tree save_ccr = current_class_ref; if (quals >= 0) { /* DR 1207: 'this' is in scope in the trailing return type. */ inject_this_parameter (current_class_type, quals); } type = cp_parser_trailing_type_id (parser); if (quals >= 0) { current_class_ptr = save_ccp; current_class_ref = save_ccr; } return type; } /* Parse a declarator-id. declarator-id: id-expression :: [opt] nested-name-specifier [opt] type-name In the `id-expression' case, the value returned is as for cp_parser_id_expression if the id-expression was an unqualified-id. If the id-expression was a qualified-id, then a SCOPE_REF is returned. The first operand is the scope (either a NAMESPACE_DECL or TREE_TYPE), but the second is still just a representation of an unqualified-id. */ static tree cp_parser_declarator_id (cp_parser* parser, bool optional_p) { tree id; /* The expression must be an id-expression. Assume that qualified names are the names of types so that: template <class T> int S<T>::R::i = 3; will work; we must treat `S<T>::R' as the name of a type. Similarly, assume that qualified names are templates, where required, so that: template <class T> int S<T>::R<T>::i = 3; will work, too. */ id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*template_p=*/NULL, /*declarator_p=*/true, optional_p); if (id && BASELINK_P (id)) id = BASELINK_FUNCTIONS (id); return id; } /* Parse a type-id. type-id: type-specifier-seq abstract-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_type_id_1 (cp_parser* parser, bool is_template_arg, bool is_trailing_return) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *abstract_declarator; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, is_trailing_return, &type_specifier_seq); if (type_specifier_seq.type == error_mark_node) return error_mark_node; /* There might or might not be an abstract declarator. */ cp_parser_parse_tentatively (parser); /* Look for the declarator. */ abstract_declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Check to see if there really was a declarator. */ if (!cp_parser_parse_definitely (parser)) abstract_declarator = NULL; if (type_specifier_seq.type && type_uses_auto (type_specifier_seq.type)) { /* A type-id with type 'auto' is only ok if the abstract declarator is a function declarator with a late-specified return type. */ if (abstract_declarator && abstract_declarator->kind == cdk_function && abstract_declarator->u.function.late_return_type) /* OK */; else { error ("invalid use of %<auto%>"); return error_mark_node; } } return groktypename (&type_specifier_seq, abstract_declarator, is_template_arg); } static tree cp_parser_type_id (cp_parser *parser) { return cp_parser_type_id_1 (parser, false, false); } static tree cp_parser_template_type_arg (cp_parser *parser) { tree r; const char *saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in template arguments"); r = cp_parser_type_id_1 (parser, true, false); parser->type_definition_forbidden_message = saved_message; return r; } static tree cp_parser_trailing_type_id (cp_parser *parser) { return cp_parser_type_id_1 (parser, false, true); } /* Parse a type-specifier-seq. type-specifier-seq: type-specifier type-specifier-seq [opt] GNU extension: type-specifier-seq: attributes type-specifier-seq [opt] If IS_DECLARATION is true, we are at the start of a "condition" or exception-declaration, so we might be followed by a declarator-id. If IS_TRAILING_RETURN is true, we are in a trailing-return-type, i.e. we've just seen "->". Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */ static void cp_parser_type_specifier_seq (cp_parser* parser, bool is_declaration, bool is_trailing_return, cp_decl_specifier_seq *type_specifier_seq) { bool seen_type_specifier = false; cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL; cp_token *start_token = NULL; /* Clear the TYPE_SPECIFIER_SEQ. */ clear_decl_specs (type_specifier_seq); /* In the context of a trailing return type, enum E { } is an elaborated-type-specifier followed by a function-body, not an enum-specifier. */ if (is_trailing_return) flags |= CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS; /* Parse the type-specifiers and attributes. */ while (true) { tree type_specifier; bool is_cv_qualifier; /* Check for attributes first. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) { type_specifier_seq->attributes = chainon (type_specifier_seq->attributes, cp_parser_attributes_opt (parser)); continue; } /* record the token of the beginning of the type specifier seq, for error reporting purposes*/ if (!start_token) start_token = cp_lexer_peek_token (parser->lexer); /* Look for the type-specifier. */ type_specifier = cp_parser_type_specifier (parser, flags, type_specifier_seq, /*is_declaration=*/false, NULL, &is_cv_qualifier); if (!type_specifier) { /* If the first type-specifier could not be found, this is not a type-specifier-seq at all. */ if (!seen_type_specifier) { cp_parser_error (parser, "expected type-specifier"); type_specifier_seq->type = error_mark_node; return; } /* If subsequent type-specifiers could not be found, the type-specifier-seq is complete. */ break; } seen_type_specifier = true; /* The standard says that a condition can be: type-specifier-seq declarator = assignment-expression However, given: struct S {}; if (int S = ...) we should treat the "S" as a declarator, not as a type-specifier. The standard doesn't say that explicitly for type-specifier-seq, but it does say that for decl-specifier-seq in an ordinary declaration. Perhaps it would be clearer just to allow a decl-specifier-seq here, and then add a semantic restriction that if any decl-specifiers that are not type-specifiers appear, the program is invalid. */ if (is_declaration && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; } cp_parser_check_decl_spec (type_specifier_seq, start_token->location); } /* Parse a parameter-declaration-clause. parameter-declaration-clause: parameter-declaration-list [opt] ... [opt] parameter-declaration-list , ... Returns a representation for the parameter declarations. A return value of NULL indicates a parameter-declaration-clause consisting only of an ellipsis. */ static tree cp_parser_parameter_declaration_clause (cp_parser* parser) { tree parameters; cp_token *token; bool ellipsis_p; bool is_error; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for trivial parameter-declaration-clauses. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } else if (token->type == CPP_CLOSE_PAREN) /* There are no parameters. */ { #ifndef NO_IMPLICIT_EXTERN_C if (in_system_header && current_class_type == NULL && current_lang_name == lang_name_c) return NULL_TREE; else #endif return void_list_node; } /* Check for `(void)', too, which is a special case. */ else if (token->keyword == RID_VOID && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_CLOSE_PAREN)) { /* Consume the `void' token. */ cp_lexer_consume_token (parser->lexer); /* There are no parameters. */ return void_list_node; } /* Parse the parameter-declaration-list. */ parameters = cp_parser_parameter_declaration_list (parser, &is_error); /* If a parse error occurred while parsing the parameter-declaration-list, then the entire parameter-declaration-clause is erroneous. */ if (is_error) return NULL; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', the clause should terminate with an ellipsis. */ if (token->type == CPP_COMMA) { /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* Expect an ellipsis. */ ellipsis_p = (cp_parser_require (parser, CPP_ELLIPSIS, RT_ELLIPSIS) != NULL); } /* It might also be `...' if the optional trailing `,' was omitted. */ else if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* And remember that we saw it. */ ellipsis_p = true; } else ellipsis_p = false; /* Finish the parameter list. */ if (!ellipsis_p) parameters = chainon (parameters, void_list_node); return parameters; } /* Parse a parameter-declaration-list. parameter-declaration-list: parameter-declaration parameter-declaration-list , parameter-declaration Returns a representation of the parameter-declaration-list, as for cp_parser_parameter_declaration_clause. However, the `void_list_node' is never appended to the list. Upon return, *IS_ERROR will be true iff an error occurred. */ static tree cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error) { tree parameters = NULL_TREE; tree *tail = &parameters; bool saved_in_unbraced_linkage_specification_p; int index = 0; /* Assume all will go well. */ *is_error = false; /* The special considerations that apply to a function within an unbraced linkage specifications do not apply to the parameters to the function. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Look for more parameters. */ while (true) { cp_parameter_declarator *parameter; tree decl = error_mark_node; bool parenthesized_p = false; /* Parse the parameter. */ parameter = cp_parser_parameter_declaration (parser, /*template_parm_p=*/false, &parenthesized_p); /* We don't know yet if the enclosing context is deprecated, so wait and warn in grokparms if appropriate. */ deprecated_state = DEPRECATED_SUPPRESS; if (parameter) decl = grokdeclarator (parameter->declarator, &parameter->decl_specifiers, PARM, parameter->default_argument != NULL_TREE, &parameter->decl_specifiers.attributes); deprecated_state = DEPRECATED_NORMAL; /* If a parse error occurred parsing the parameter declaration, then the entire parameter-declaration-list is erroneous. */ if (decl == error_mark_node) { *is_error = true; parameters = error_mark_node; break; } if (parameter->decl_specifiers.attributes) cplus_decl_attributes (&decl, parameter->decl_specifiers.attributes, 0); if (DECL_NAME (decl)) decl = pushdecl (decl); if (decl != error_mark_node) { retrofit_lang_decl (decl); DECL_PARM_INDEX (decl) = ++index; DECL_PARM_LEVEL (decl) = function_parm_depth (); } /* Add the new parameter to the list. */ *tail = build_tree_list (parameter->default_argument, decl); tail = &TREE_CHAIN (*tail); /* Peek at the next token. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) /* These are for Objective-C++ */ || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) /* The parameter-declaration-list is complete. */ break; else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, then the list is complete. */ if (token->type == CPP_ELLIPSIS) break; /* Otherwise, there must be more parameters. Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* When parsing something like: int i(float f, double d) we can tell after seeing the declaration for "f" that we are not looking at an initialization of a variable "i", but rather at the declaration of a function "i". Due to the fact that the parsing of template arguments (as specified to a template-id) requires backtracking we cannot use this technique when inside a template argument list. */ if (!parser->in_template_argument_list_p && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) /* However, a parameter-declaration of the form "foat(f)" (which is a valid declaration of a parameter "f") can also be interpreted as an expression (the conversion of "f" to "float"). */ && !parenthesized_p) cp_parser_commit_to_tentative_parse (parser); } else { cp_parser_error (parser, "expected %<,%> or %<...%>"); if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); break; } } parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; return parameters; } /* Parse a parameter declaration. parameter-declaration: decl-specifier-seq ... [opt] declarator decl-specifier-seq declarator = assignment-expression decl-specifier-seq ... [opt] abstract-declarator [opt] decl-specifier-seq abstract-declarator [opt] = assignment-expression If TEMPLATE_PARM_P is TRUE, then this parameter-declaration declares a template parameter. (In that case, a non-nested `>' token encountered during the parsing of the assignment-expression is not interpreted as a greater-than operator.) Returns a representation of the parameter, or NULL if an error occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is of the form "(p)". */ static cp_parameter_declarator * cp_parser_parameter_declaration (cp_parser *parser, bool template_parm_p, bool *parenthesized_p) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; cp_declarator *declarator; tree default_argument; cp_token *token = NULL, *declarator_token_start = NULL; const char *saved_message; /* In a template parameter, `>' is not an operator. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ /* Type definitions may not appear in parameter types. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in parameter types"); /* Parse the declaration-specifiers. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &decl_specifiers, &declares_class_or_enum); /* Complain about missing 'typename' or other invalid type names. */ if (!decl_specifiers.any_type_specifiers_p) cp_parser_parse_and_diagnose_invalid_type_name (parser); /* If an error occurred, there's no reason to attempt to parse the rest of the declaration. */ if (cp_parser_error_occurred (parser)) { parser->type_definition_forbidden_message = saved_message; return NULL; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a `)', `,', `=', `>', or `...', then there is no declarator. However, when variadic templates are enabled, there may be a declarator following `...'. */ if (token->type == CPP_CLOSE_PAREN || token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) { declarator = NULL; if (parenthesized_p) *parenthesized_p = false; } /* Otherwise, there should be a declarator. */ else { bool saved_default_arg_ok_p = parser->default_arg_ok_p; parser->default_arg_ok_p = false; /* After seeing a decl-specifier-seq, if the next token is not a "(", there is no possibility that the code is a valid expression. Therefore, if parsing tentatively, we commit at this point. */ if (!parser->in_template_argument_list_p /* In an expression context, having seen: (int((char ... we cannot be sure whether we are looking at a function-type (taking a "char" as a parameter) or a cast of some object of type "char" to "int". */ && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) cp_parser_commit_to_tentative_parse (parser); /* Parse the declarator. */ declarator_token_start = token; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, parenthesized_p, /*member_p=*/false); parser->default_arg_ok_p = saved_default_arg_ok_p; /* After the declarator, allow more attributes. */ decl_specifiers.attributes = chainon (decl_specifiers.attributes, cp_parser_attributes_opt (parser)); } /* If the next token is an ellipsis, and we have not seen a declarator name, and the type of the declarator contains parameter packs but it is not a TYPE_PACK_EXPANSION, then we actually have a parameter pack expansion expression. Otherwise, leave the ellipsis for a C-style variadic function. */ token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { tree type = decl_specifiers.type; if (type && DECL_P (type)) type = TREE_TYPE (type); if (type && TREE_CODE (type) != TYPE_PACK_EXPANSION && declarator_can_be_parameter_pack (declarator) && (!declarator || !declarator->parameter_pack_p) && uses_parameter_packs (type)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); /* Build a pack expansion type */ if (declarator) declarator->parameter_pack_p = true; else decl_specifiers.type = make_pack_expansion (type); } } /* The restriction on defining new types applies only to the type of the parameter, not to the default argument. */ parser->type_definition_forbidden_message = saved_message; /* If the next token is `=', then process a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { token = cp_lexer_peek_token (parser->lexer); /* If we are defining a class, then the tokens that make up the default argument must be saved and processed later. */ if (!template_parm_p && at_class_scope_p () && TYPE_BEING_DEFINED (current_class_type) && !LAMBDA_TYPE_P (current_class_type)) default_argument = cp_parser_cache_defarg (parser, /*nsdmi=*/false); /* Outside of a class definition, we can just parse the assignment-expression. */ else default_argument = cp_parser_default_argument (parser, template_parm_p); if (!parser->default_arg_ok_p) { if (flag_permissive) warning (0, "deprecated use of default argument for parameter of non-function"); else { error_at (token->location, "default arguments are only " "permitted for function parameters"); default_argument = NULL_TREE; } } else if ((declarator && declarator->parameter_pack_p) || (decl_specifiers.type && PACK_EXPANSION_P (decl_specifiers.type))) { /* Find the name of the parameter pack. */ cp_declarator *id_declarator = declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; if (id_declarator && id_declarator->kind == cdk_id) error_at (declarator_token_start->location, template_parm_p ? G_("template parameter pack %qD " "cannot have a default argument") : G_("parameter pack %qD cannot have " "a default argument"), id_declarator->u.id.unqualified_name); else error_at (declarator_token_start->location, template_parm_p ? G_("template parameter pack cannot have " "a default argument") : G_("parameter pack cannot have a " "default argument")); default_argument = NULL_TREE; } } else default_argument = NULL_TREE; return make_parameter_declarator (&decl_specifiers, declarator, default_argument); } /* Parse a default argument and return it. TEMPLATE_PARM_P is true if this is a default argument for a non-type template parameter. */ static tree cp_parser_default_argument (cp_parser *parser, bool template_parm_p) { tree default_argument = NULL_TREE; bool saved_greater_than_is_operator_p; bool saved_local_variables_forbidden_p; bool non_constant_p, is_direct_init; /* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is set correctly. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = !template_parm_p; /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; /* Parse the assignment-expression. */ if (template_parm_p) push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_initializer (parser, &is_direct_init, &non_constant_p); if (BRACE_ENCLOSED_INITIALIZER_P (default_argument)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); if (template_parm_p) pop_deferring_access_checks (); parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; return default_argument; } /* Parse a function-body. function-body: compound_statement */ static void cp_parser_function_body (cp_parser *parser) { cp_parser_compound_statement (parser, NULL, false, true); } /* Parse a ctor-initializer-opt followed by a function-body. Return true if a ctor-initializer was present. */ static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser) { tree body, list; bool ctor_initializer_p; const bool check_body_p = DECL_CONSTRUCTOR_P (current_function_decl) && DECL_DECLARED_CONSTEXPR_P (current_function_decl); tree last = NULL; /* Begin the function body. */ body = begin_function_body (); /* Parse the optional ctor-initializer. */ ctor_initializer_p = cp_parser_ctor_initializer_opt (parser); /* If we're parsing a constexpr constructor definition, we need to check that the constructor body is indeed empty. However, before we get to cp_parser_function_body lot of junk has been generated, so we can't just check that we have an empty block. Rather we take a snapshot of the outermost block, and check whether cp_parser_function_body changed its state. */ if (check_body_p) { list = cur_stmt_list; if (STATEMENT_LIST_TAIL (list)) last = STATEMENT_LIST_TAIL (list)->stmt; } /* Parse the function-body. */ cp_parser_function_body (parser); if (check_body_p) check_constexpr_ctor_body (last, list); /* Finish the function body. */ finish_function_body (body); return ctor_initializer_p; } /* Parse an initializer. initializer: = initializer-clause ( expression-list ) Returns an expression representing the initializer. If no initializer is present, NULL_TREE is returned. *IS_DIRECT_INIT is set to FALSE if the `= initializer-clause' production is used, and TRUE otherwise. *IS_DIRECT_INIT is set to TRUE if there is no initializer present. If there is an initializer, and it is not a constant-expression, *NON_CONSTANT_P is set to true; otherwise it is set to false. */ static tree cp_parser_initializer (cp_parser* parser, bool* is_direct_init, bool* non_constant_p) { cp_token *token; tree init; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Let our caller know whether or not this initializer was parenthesized. */ *is_direct_init = (token->type != CPP_EQ); /* Assume that the initializer is constant. */ *non_constant_p = false; if (token->type == CPP_EQ) { /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer-clause. */ init = cp_parser_initializer_clause (parser, non_constant_p); } else if (token->type == CPP_OPEN_PAREN) { VEC(tree,gc) *vec; vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, non_constant_p); if (vec == NULL) return error_mark_node; init = build_tree_list_vec (vec); release_tree_vector (vec); } else if (token->type == CPP_OPEN_BRACE) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); init = cp_parser_braced_list (parser, non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (init) = 1; } else { /* Anything else is an error. */ cp_parser_error (parser, "expected initializer"); init = error_mark_node; } return init; } /* Parse an initializer-clause. initializer-clause: assignment-expression braced-init-list Returns an expression representing the initializer. If the `assignment-expression' production is used the value returned is simply a representation for the expression. Otherwise, calls cp_parser_braced_list. */ static tree cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Assume the expression is constant. */ *non_constant_p = false; /* If it is not a `{', then we are looking at an assignment-expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, non_constant_p); } else initializer = cp_parser_braced_list (parser, non_constant_p); return initializer; } /* Parse a brace-enclosed initializer list. braced-init-list: { initializer-list , [opt] } { } Returns a CONSTRUCTOR. The CONSTRUCTOR_ELTS will be the elements of the initializer-list (or NULL, if the last production is used). The TREE_TYPE for the CONSTRUCTOR will be NULL_TREE. There is no way to detect whether or not the optional trailing `,' was provided. NON_CONSTANT_P is as for cp_parser_initializer. */ static tree cp_parser_braced_list (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Create a CONSTRUCTOR to represent the braced-initializer. */ initializer = make_node (CONSTRUCTOR); /* If it's not a `}', then there is a non-trivial initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) { /* Parse the initializer list. */ CONSTRUCTOR_ELTS (initializer) = cp_parser_initializer_list (parser, non_constant_p); /* A trailing `,' token is allowed. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); } /* Now, there should be a trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); TREE_TYPE (initializer) = init_list_type_node; return initializer; } /* Parse an initializer-list. initializer-list: initializer-clause ... [opt] initializer-list , initializer-clause ... [opt] GNU Extension: initializer-list: designation initializer-clause ...[opt] initializer-list , designation initializer-clause ...[opt] designation: . identifier = identifier : [ constant-expression ] = Returns a VEC of constructor_elt. The VALUE of each elt is an expression for the initializer. If the INDEX of the elt is non-NULL, it is the IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is as for cp_parser_initializer. */ static VEC(constructor_elt,gc) * cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p) { VEC(constructor_elt,gc) *v = NULL; /* Assume all of the expressions are constant. */ *non_constant_p = false; /* Parse the rest of the list. */ while (true) { cp_token *token; tree designator; tree initializer; bool clause_non_constant_p; /* If the next token is an identifier and the following one is a colon, we are looking at the GNU designated-initializer syntax. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_NAME) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON) { /* Warn the user that they are using an extension. */ pedwarn (input_location, OPT_pedantic, "ISO C++ does not allow designated initializers"); /* Consume the identifier. */ designator = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); } /* Also handle the C99 syntax, '. id ='. */ else if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_DOT) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ) { /* Warn the user that they are using an extension. */ pedwarn (input_location, OPT_pedantic, "ISO C++ does not allow C99 designated initializers"); /* Consume the `.'. */ cp_lexer_consume_token (parser->lexer); /* Consume the identifier. */ designator = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); } /* Also handle C99 array designators, '[ const ] ='. */ else if (cp_parser_allow_gnu_extensions_p (parser) && !c_dialect_objc () && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* In C++11, [ could start a lambda-introducer. */ bool non_const = false; cp_parser_parse_tentatively (parser); cp_lexer_consume_token (parser->lexer); designator = cp_parser_constant_expression (parser, true, &non_const); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); cp_parser_require (parser, CPP_EQ, RT_EQ); if (!cp_parser_parse_definitely (parser)) designator = NULL_TREE; else if (non_const) require_potential_rvalue_constant_expression (designator); } else designator = NULL_TREE; /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &clause_non_constant_p); /* If any clause is non-constant, so is the entire initializer. */ if (clause_non_constant_p) *non_constant_p = true; /* If we have an ellipsis, this is an initializer pack expansion. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Turn the initializer into an initializer expansion. */ initializer = make_pack_expansion (initializer); } /* Add it to the vector. */ CONSTRUCTOR_APPEND_ELT (v, designator, initializer); /* If the next token is not a comma, we have reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If the next token is a `}', then we're still done. An initializer-clause can have a trailing `,' after the initializer-list and before the closing `}'. */ if (token->type == CPP_CLOSE_BRACE) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return v; } /* Classes [gram.class] */ /* Parse a class-name. class-name: identifier template-id TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used to indicate that names looked up in dependent types should be assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template' keyword has been used to indicate that the name that appears next is a template. TAG_TYPE indicates the explicit tag given before the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class is the class being defined in a class-head. Returns the TYPE_DECL representing the class. */ static tree cp_parser_class_name (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, enum tag_types tag_type, bool check_dependency_p, bool class_head_p, bool is_declaration) { tree decl; tree scope; bool typename_p; cp_token *token; tree identifier = NULL_TREE; /* All class-names start with an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID) { cp_parser_error (parser, "expected class-name"); return error_mark_node; } /* PARSER->SCOPE can be cleared when parsing the template-arguments to a template-id, so we save it here. */ scope = parser->scope; if (scope == error_mark_node) return error_mark_node; /* Any name names a type if we're following the `typename' keyword in a qualified name where the enclosing scope is type-dependent. */ typename_p = (typename_keyword_p && scope && TYPE_P (scope) && dependent_type_p (scope)); /* Handle the common case (an identifier, but not a template-id) efficiently. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) { cp_token *identifier_token; bool ambiguous_p; /* Look for the identifier. */ identifier_token = cp_lexer_peek_token (parser->lexer); ambiguous_p = identifier_token->ambiguous_p; identifier = cp_parser_identifier (parser); /* If the next token isn't an identifier, we are certainly not looking at a class-name. */ if (identifier == error_mark_node) decl = error_mark_node; /* If we know this is a type-name, there's no need to look it up. */ else if (typename_p) decl = identifier; else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (ambiguous_p) { cp_parser_simulate_error (parser); return error_mark_node; } /* If the next token is a `::', then the name must be a type name. [basic.lookup.qual] During the lookup for a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) tag_type = typename_type; /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, check_dependency_p, &ambiguous_decls, identifier_token->location); if (ambiguous_decls) { if (cp_parser_parsing_tentatively (parser)) cp_parser_simulate_error (parser); return error_mark_node; } } } else { /* Try a template-id. */ decl = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, is_declaration); if (decl == error_mark_node) return error_mark_node; } decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p); /* If this is a typename, create a TYPENAME_TYPE. */ if (typename_p && decl != error_mark_node) { decl = make_typename_type (scope, decl, typename_type, /*complain=*/tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } decl = strip_using_decl (decl); /* Check to see that it is really the name of a class. */ if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* Situations like this: template <typename T> struct A { typename T::template X<int>::I i; }; are problematic. Is `T::template X<int>' a class-name? The standard does not seem to be definitive, but there is no other valid interpretation of the following `::'. Therefore, those names are considered class-names. */ { decl = make_typename_type (scope, decl, tag_type, tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } else if (TREE_CODE (decl) != TYPE_DECL || TREE_TYPE (decl) == error_mark_node || !MAYBE_CLASS_TYPE_P (TREE_TYPE (decl)) /* In Objective-C 2.0, a classname followed by '.' starts a dot-syntax expression, and it's not a type-name. */ || (c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && objc_is_class_name (decl))) decl = error_mark_node; if (decl == error_mark_node) cp_parser_error (parser, "expected class-name"); else if (identifier && !parser->scope) maybe_note_name_used_in_class (identifier, decl); return decl; } /* Parse a class-specifier. class-specifier: class-head { member-specification [opt] } Returns the TREE_TYPE representing the class. */ static tree cp_parser_class_specifier_1 (cp_parser* parser) { tree type; tree attributes = NULL_TREE; bool nested_name_specifier_p; unsigned saved_num_template_parameter_lists; bool saved_in_function_body; unsigned char in_statement; bool in_switch_statement_p; bool saved_in_unbraced_linkage_specification_p; tree old_scope = NULL_TREE; tree scope = NULL_TREE; cp_token *closing_brace; push_deferring_access_checks (dk_no_deferred); /* Parse the class-head. */ type = cp_parser_class_head (parser, &nested_name_specifier_p); /* If the class-head was a semantic disaster, skip the entire body of the class. */ if (!type) { cp_parser_skip_to_end_of_block_or_statement (parser); pop_deferring_access_checks (); return error_mark_node; } /* Look for the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) { pop_deferring_access_checks (); return error_mark_node; } /* Issue an error message if type-definitions are forbidden here. */ cp_parser_check_type_definition (parser); /* Remember that we are defining one more class. */ ++parser->num_classes_being_defined; /* Inside the class, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* We are not in a function body. */ saved_in_function_body = parser->in_function_body; parser->in_function_body = false; /* Or in a loop. */ in_statement = parser->in_statement; parser->in_statement = 0; /* Or in a switch. */ in_switch_statement_p = parser->in_switch_statement_p; parser->in_switch_statement_p = false; /* We are not immediately inside an extern "lang" block. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Start the class. */ if (nested_name_specifier_p) { scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type)); old_scope = push_inner_scope (scope); } type = begin_class_definition (type); if (type == error_mark_node) /* If the type is erroneous, skip the entire body of the class. */ cp_parser_skip_to_closing_brace (parser); else /* Parse the member-specification. */ cp_parser_member_specification_opt (parser); /* Look for the trailing `}'. */ closing_brace = cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); /* Look for trailing attributes to apply to this class. */ if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_attributes_opt (parser); if (type != error_mark_node) type = finish_struct (type, attributes); if (nested_name_specifier_p) pop_inner_scope (old_scope, scope); /* We've finished a type definition. Check for the common syntax error of forgetting a semicolon after the definition. We need to be careful, as we can't just check for not-a-semicolon and be done with it; the user might have typed: class X { } c = ...; class X { } *p = ...; and so forth. Instead, enumerate all the possible tokens that might follow this production; if we don't see one of them, then complain and silently insert the semicolon. */ { cp_token *token = cp_lexer_peek_token (parser->lexer); bool want_semicolon = true; switch (token->type) { case CPP_NAME: case CPP_SEMICOLON: case CPP_MULT: case CPP_AND: case CPP_OPEN_PAREN: case CPP_CLOSE_PAREN: case CPP_COMMA: want_semicolon = false; break; /* While it's legal for type qualifiers and storage class specifiers to follow type definitions in the grammar, only compiler testsuites contain code like that. Assume that if we see such code, then what we're really seeing is a case like: class X { } const <type> var = ...; or class Y { } static <type> func (...) ... i.e. the qualifier or specifier applies to the next declaration. To do so, however, we need to look ahead one more token to see if *that* token is a type specifier. This code could be improved to handle: class Z { } static const <type> var = ...; */ case CPP_KEYWORD: if (keyword_is_decl_specifier (token->keyword)) { cp_token *lookahead = cp_lexer_peek_nth_token (parser->lexer, 2); /* Handling user-defined types here would be nice, but very tricky. */ want_semicolon = (lookahead->type == CPP_KEYWORD && keyword_begins_type_specifier (lookahead->keyword)); } break; default: break; } /* If we don't have a type, then something is very wrong and we shouldn't try to do anything clever. Likewise for not seeing the closing brace. */ if (closing_brace && TYPE_P (type) && want_semicolon) { cp_token_position prev = cp_lexer_previous_token_position (parser->lexer); cp_token *prev_token = cp_lexer_token_at (parser->lexer, prev); location_t loc = prev_token->location; if (CLASSTYPE_DECLARED_CLASS (type)) error_at (loc, "expected %<;%> after class definition"); else if (TREE_CODE (type) == RECORD_TYPE) error_at (loc, "expected %<;%> after struct definition"); else if (TREE_CODE (type) == UNION_TYPE) error_at (loc, "expected %<;%> after union definition"); else gcc_unreachable (); /* Unget one token and smash it to look as though we encountered a semicolon in the input stream. */ cp_lexer_set_token_position (parser->lexer, prev); token = cp_lexer_peek_token (parser->lexer); token->type = CPP_SEMICOLON; token->keyword = RID_MAX; } } /* If this class is not itself within the scope of another class, then we need to parse the bodies of all of the queued function definitions. Note that the queued functions defined in a class are not always processed immediately following the class-specifier for that class. Consider: struct A { struct B { void f() { sizeof (A); } }; }; If `f' were processed before the processing of `A' were completed, there would be no way to compute the size of `A'. Note that the nesting we are interested in here is lexical -- not the semantic nesting given by TYPE_CONTEXT. In particular, for: struct A { struct B; }; struct A::B { void f() { } }; there is no need to delay the parsing of `A::B::f'. */ if (--parser->num_classes_being_defined == 0) { tree decl; tree class_type = NULL_TREE; tree pushed_scope = NULL_TREE; unsigned ix; cp_default_arg_entry *e; tree save_ccp, save_ccr; /* In a first pass, parse default arguments to the functions. Then, in a second pass, parse the bodies of the functions. This two-phased approach handles cases like: struct S { void f() { g(); } void g(int i = 3); }; */ FOR_EACH_VEC_ELT (cp_default_arg_entry, unparsed_funs_with_default_args, ix, e) { decl = e->decl; /* If there are default arguments that have not yet been processed, take care of them now. */ if (class_type != e->class_type) { if (pushed_scope) pop_scope (pushed_scope); class_type = e->class_type; pushed_scope = push_scope (class_type); } /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (decl); /* Parse the default argument expressions. */ cp_parser_late_parsing_default_args (parser, decl); /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); } VEC_truncate (cp_default_arg_entry, unparsed_funs_with_default_args, 0); /* Now parse any NSDMIs. */ save_ccp = current_class_ptr; save_ccr = current_class_ref; FOR_EACH_VEC_ELT (tree, unparsed_nsdmis, ix, decl) { if (class_type != DECL_CONTEXT (decl)) { if (pushed_scope) pop_scope (pushed_scope); class_type = DECL_CONTEXT (decl); pushed_scope = push_scope (class_type); } inject_this_parameter (class_type, TYPE_UNQUALIFIED); cp_parser_late_parsing_nsdmi (parser, decl); } VEC_truncate (tree, unparsed_nsdmis, 0); current_class_ptr = save_ccp; current_class_ref = save_ccr; if (pushed_scope) pop_scope (pushed_scope); /* Now parse the body of the functions. */ FOR_EACH_VEC_ELT (tree, unparsed_funs_with_definitions, ix, decl) cp_parser_late_parsing_for_member (parser, decl); VEC_truncate (tree, unparsed_funs_with_definitions, 0); } /* Put back any saved access checks. */ pop_deferring_access_checks (); /* Restore saved state. */ parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; parser->in_function_body = saved_in_function_body; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; return type; } static tree cp_parser_class_specifier (cp_parser* parser) { tree ret; timevar_push (TV_PARSE_STRUCT); ret = cp_parser_class_specifier_1 (parser); timevar_pop (TV_PARSE_STRUCT); return ret; } /* Parse a class-head. class-head: class-key identifier [opt] base-clause [opt] class-key nested-name-specifier identifier class-virt-specifier [opt] base-clause [opt] class-key nested-name-specifier [opt] template-id base-clause [opt] class-virt-specifier: final GNU Extensions: class-key attributes identifier [opt] base-clause [opt] class-key attributes nested-name-specifier identifier base-clause [opt] class-key attributes nested-name-specifier [opt] template-id base-clause [opt] Upon return BASES is initialized to the list of base classes (or NULL, if there are none) in the same form returned by cp_parser_base_clause. Returns the TYPE of the indicated class. Sets *NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions involving a nested-name-specifier was used, and FALSE otherwise. Returns error_mark_node if this is not a class-head. Returns NULL_TREE if the class-head is syntactically valid, but semantically invalid in a way that means we should skip the entire body of the class. */ static tree cp_parser_class_head (cp_parser* parser, bool* nested_name_specifier_p) { tree nested_name_specifier; enum tag_types class_key; tree id = NULL_TREE; tree type = NULL_TREE; tree attributes; tree bases; cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; bool template_id_p = false; bool qualified_p = false; bool invalid_nested_name_p = false; bool invalid_explicit_specialization_p = false; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; tree pushed_scope = NULL_TREE; unsigned num_templates; cp_token *type_start_token = NULL, *nested_name_specifier_token_start = NULL; /* Assume no nested-name-specifier will be present. */ *nested_name_specifier_p = false; /* Assume no template parameter lists will be used in defining the type. */ num_templates = 0; parser->colon_corrects_to_scope_p = false; /* Look for the class-key. */ class_key = cp_parser_class_key (parser); if (class_key == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* If the next token is `::', that is invalid -- but sometimes people do try to write: struct ::S {}; Handle this gracefully by accepting the extra qualifier, and then issuing an error about it later if this really is a class-head. If it turns out just to be an elaborated type specifier, remain silent. */ if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)) qualified_p = true; push_deferring_access_checks (dk_no_check); /* Determine the name of the class. Begin by looking for an optional nested-name-specifier. */ nested_name_specifier_token_start = cp_lexer_peek_token (parser->lexer); nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false); /* If there was a nested-name-specifier, then there *must* be an identifier. */ if (nested_name_specifier) { type_start_token = cp_lexer_peek_token (parser->lexer); /* Although the grammar says `identifier', it really means `class-name' or `template-name'. You are only allowed to define a class that has already been declared with this syntax. The proposed resolution for Core Issue 180 says that wherever you see `class T::X' you should treat `X' as a type-name. It is OK to define an inaccessible class; for example: class A { class B; }; class A::B {}; We do not know if we will see a class-name, or a template-name. We look for a class-name first, in case the class-name is a template-id; if we looked for the template-name first we would stop after the template-name. */ cp_parser_parse_tentatively (parser); type = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, class_type, /*check_dependency_p=*/false, /*class_head_p=*/true, /*is_declaration=*/false); /* If that didn't work, ignore the nested-name-specifier. */ if (!cp_parser_parse_definitely (parser)) { invalid_nested_name_p = true; type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_identifier (parser); if (id == error_mark_node) id = NULL_TREE; } /* If we could not find a corresponding TYPE, treat this declaration like an unqualified declaration. */ if (type == error_mark_node) nested_name_specifier = NULL_TREE; /* Otherwise, count the number of templates used in TYPE and its containing scopes. */ else { tree scope; for (scope = TREE_TYPE (type); scope && TREE_CODE (scope) != NAMESPACE_DECL; scope = (TYPE_P (scope) ? TYPE_CONTEXT (scope) : DECL_CONTEXT (scope))) if (TYPE_P (scope) && CLASS_TYPE_P (scope) && CLASSTYPE_TEMPLATE_INFO (scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (scope)) ++num_templates; } } /* Otherwise, the identifier is optional. */ else { /* We don't know whether what comes next is a template-id, an identifier, or nothing at all. */ cp_parser_parse_tentatively (parser); /* Check for a template-id. */ type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*is_declaration=*/true); /* If that didn't work, it could still be an identifier. */ if (!cp_parser_parse_definitely (parser)) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_identifier (parser); } else id = NULL_TREE; } else { template_id_p = true; ++num_templates; } } pop_deferring_access_checks (); if (id) { cp_parser_check_for_invalid_template_id (parser, id, type_start_token->location); } virt_specifiers = cp_parser_virt_specifier_seq_opt (parser); /* If it's not a `:' or a `{' then we can't really be looking at a class-head, since a class-head only appears as part of a class-specifier. We have to detect this situation before calling xref_tag, since that has irreversible side-effects. */ if (!cp_parser_next_token_starts_class_definition_p (parser)) { cp_parser_error (parser, "expected %<{%> or %<:%>"); type = error_mark_node; goto out; } /* At this point, we're going ahead with the class-specifier, even if some other problem occurs. */ cp_parser_commit_to_tentative_parse (parser); if (virt_specifiers & VIRT_SPEC_OVERRIDE) { cp_parser_error (parser, "cannot specify %<override%> for a class"); type = error_mark_node; goto out; } /* Issue the error about the overly-qualified name now. */ if (qualified_p) { cp_parser_error (parser, "global qualification of class name is invalid"); type = error_mark_node; goto out; } else if (invalid_nested_name_p) { cp_parser_error (parser, "qualified name does not name a class"); type = error_mark_node; goto out; } else if (nested_name_specifier) { tree scope; /* Reject typedef-names in class heads. */ if (!DECL_IMPLICIT_TYPEDEF_P (type)) { error_at (type_start_token->location, "invalid class name in declaration of %qD", type); type = NULL_TREE; goto done; } /* Figure out in what scope the declaration is being placed. */ scope = current_scope (); /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ if (scope && !is_ancestor (scope, nested_name_specifier)) { if (at_namespace_scope_p ()) error_at (type_start_token->location, "declaration of %qD in namespace %qD which does not " "enclose %qD", type, scope, nested_name_specifier); else error_at (type_start_token->location, "declaration of %qD in %qD which does not enclose %qD", type, scope, nested_name_specifier); type = NULL_TREE; goto done; } /* [dcl.meaning] A declarator-id shall not be qualified except for the definition of a ... nested class outside of its class ... [or] the definition or explicit instantiation of a class member of a namespace outside of its namespace. */ if (scope == nested_name_specifier) { permerror (nested_name_specifier_token_start->location, "extra qualification not allowed"); nested_name_specifier = NULL_TREE; num_templates = 0; } } /* An explicit-specialization must be preceded by "template <>". If it is not, try to recover gracefully. */ if (at_namespace_scope_p () && parser->num_template_parameter_lists == 0 && template_id_p) { error_at (type_start_token->location, "an explicit specialization must be preceded by %<template <>%>"); invalid_explicit_specialization_p = true; /* Take the same action that would have been taken by cp_parser_explicit_specialization. */ ++parser->num_template_parameter_lists; begin_specialization (); } /* There must be no "return" statements between this point and the end of this function; set "type "to the correct return value and use "goto done;" to return. */ /* Make sure that the right number of template parameters were present. */ if (!cp_parser_check_template_parameters (parser, num_templates, type_start_token->location, /*declarator=*/NULL)) { /* If something went wrong, there is no point in even trying to process the class-definition. */ type = NULL_TREE; goto done; } /* Look up the type. */ if (template_id_p) { if (TREE_CODE (id) == TEMPLATE_ID_EXPR && (DECL_FUNCTION_TEMPLATE_P (TREE_OPERAND (id, 0)) || TREE_CODE (TREE_OPERAND (id, 0)) == OVERLOAD)) { error_at (type_start_token->location, "function template %qD redeclared as a class template", id); type = error_mark_node; } else { type = TREE_TYPE (id); type = maybe_process_partial_specialization (type); } if (nested_name_specifier) pushed_scope = push_scope (nested_name_specifier); } else if (nested_name_specifier) { tree class_type; /* Given: template <typename T> struct S { struct T }; template <typename T> struct S<T>::T { }; we will get a TYPENAME_TYPE when processing the definition of `S::T'. We need to resolve it to the actual type before we try to define it. */ if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE) { class_type = resolve_typename_type (TREE_TYPE (type), /*only_current_p=*/false); if (TREE_CODE (class_type) != TYPENAME_TYPE) type = TYPE_NAME (class_type); else { cp_parser_error (parser, "could not resolve typename type"); type = error_mark_node; } } if (maybe_process_partial_specialization (TREE_TYPE (type)) == error_mark_node) { type = NULL_TREE; goto done; } class_type = current_class_type; /* Enter the scope indicated by the nested-name-specifier. */ pushed_scope = push_scope (nested_name_specifier); /* Get the canonical version of this type. */ type = TYPE_MAIN_DECL (TREE_TYPE (type)); if (PROCESSING_REAL_TEMPLATE_DECL_P () && !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type))) { type = push_template_decl (type); if (type == error_mark_node) { type = NULL_TREE; goto done; } } type = TREE_TYPE (type); *nested_name_specifier_p = true; } else /* The name is not a nested name. */ { /* If the class was unnamed, create a dummy name. */ if (!id) id = make_anon_name (); type = xref_tag (class_key, id, /*tag_scope=*/ts_current, parser->num_template_parameter_lists); } /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type); cp_parser_check_class_key (class_key, type); /* If this type was already complete, and we see another definition, that's an error. */ if (type != error_mark_node && COMPLETE_TYPE_P (type)) { error_at (type_start_token->location, "redefinition of %q#T", type); error_at (type_start_token->location, "previous definition of %q+#T", type); type = NULL_TREE; goto done; } else if (type == error_mark_node) type = NULL_TREE; if (type) { /* Apply attributes now, before any use of the class as a template argument in its base list. */ cplus_decl_attributes (&type, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE); fixup_attribute_variants (type); } /* We will have entered the scope containing the class; the names of base classes should be looked up in that context. For example: struct A { struct B {}; struct C; }; struct A::C : B {}; is valid. */ /* Get the list of base-classes, if there is one. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) bases = cp_parser_base_clause (parser); else bases = NULL_TREE; /* If we're really defining a class, process the base classes. If they're invalid, fail. */ if (type && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && !xref_basetypes (type, bases)) type = NULL_TREE; done: /* Leave the scope given by the nested-name-specifier. We will enter the class scope itself while processing the members. */ if (pushed_scope) pop_scope (pushed_scope); if (invalid_explicit_specialization_p) { end_specialization (); --parser->num_template_parameter_lists; } if (type) DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location; if (type && (virt_specifiers & VIRT_SPEC_FINAL)) CLASSTYPE_FINAL (type) = 1; out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; return type; } /* Parse a class-key. class-key: class struct union Returns the kind of class-key specified, or none_type to indicate error. */ static enum tag_types cp_parser_class_key (cp_parser* parser) { cp_token *token; enum tag_types tag_type; /* Look for the class-key. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_KEY); if (!token) return none_type; /* Check to see if the TOKEN is a class-key. */ tag_type = cp_parser_token_is_class_key (token); if (!tag_type) cp_parser_error (parser, "expected class-key"); return tag_type; } /* Parse an (optional) member-specification. member-specification: member-declaration member-specification [opt] access-specifier : member-specification [opt] */ static void cp_parser_member_specification_opt (cp_parser* parser) { while (true) { cp_token *token; enum rid keyword; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `}', or EOF then we've seen all the members. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; /* See if this token is a keyword. */ keyword = token->keyword; switch (keyword) { case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); /* Remember which access-specifier is active. */ current_access_specifier = token->u.value; /* Look for the `:'. */ cp_parser_require (parser, CPP_COLON, RT_COLON); break; default: /* Accept #pragmas at class scope. */ if (token->type == CPP_PRAGMA) { cp_parser_pragma (parser, pragma_external); break; } /* Otherwise, the next construction must be a member-declaration. */ cp_parser_member_declaration (parser); } } } /* Parse a member-declaration. member-declaration: decl-specifier-seq [opt] member-declarator-list [opt] ; function-definition ; [opt] :: [opt] nested-name-specifier template [opt] unqualified-id ; using-declaration template-declaration alias-declaration member-declarator-list: member-declarator member-declarator-list , member-declarator member-declarator: declarator pure-specifier [opt] declarator constant-initializer [opt] identifier [opt] : constant-expression GNU Extensions: member-declaration: __extension__ member-declaration member-declarator: declarator attributes [opt] pure-specifier [opt] declarator attributes [opt] constant-initializer [opt] identifier [opt] attributes [opt] : constant-expression C++0x Extensions: member-declaration: static_assert-declaration */ static void cp_parser_member_declaration (cp_parser* parser) { cp_decl_specifier_seq decl_specifiers; tree prefix_attributes; tree decl; int declares_class_or_enum; bool friend_p; cp_token *token = NULL; cp_token *decl_spec_token_start = NULL; cp_token *initializer_token_start = NULL; int saved_pedantic; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Recurse. */ cp_parser_member_declaration (parser); /* Restore the old value of the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Check for a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* An explicit specialization here is an error condition, and we expect the specialization handler to detect and report this. */ if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); else cp_parser_template_declaration (parser, /*member_p=*/true); return; } /* Check for a using-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) { if (cxx_dialect < cxx0x) { /* Parse the using-declaration. */ cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } else { tree decl; cp_parser_parse_tentatively (parser); decl = cp_parser_alias_declaration (parser); if (cp_parser_parse_definitely (parser)) finish_member_declaration (decl); else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } } /* Check for @defs. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS)) { tree ivar, member; tree ivar_chains = cp_parser_objc_defs_expression (parser); ivar = ivar_chains; while (ivar) { member = ivar; ivar = TREE_CHAIN (member); TREE_CHAIN (member) = NULL_TREE; finish_member_declaration (member); } return; } /* If the next token is `static_assert' we have a static assertion. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC_ASSERT)) { cp_parser_static_assert (parser, /*member_p=*/true); return; } parser->colon_corrects_to_scope_p = false; if (cp_parser_using_declaration (parser, /*access_declaration=*/true)) goto out; /* Parse the decl-specifier-seq. */ decl_spec_token_start = cp_lexer_peek_token (parser->lexer); cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); prefix_attributes = decl_specifiers.attributes; decl_specifiers.attributes = NULL_TREE; /* Check for an invalid type-name. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) goto out; /* If there is no declarator, then the decl-specifier-seq should specify a type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { /* If there was no decl-specifier-seq, and the next token is a `;', then we have something like: struct S { ; }; [class.mem] Each member-declaration shall declare at least one member name of the class. */ if (!decl_specifiers.any_specifiers_p) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (!in_system_header_at (token->location)) pedwarn (token->location, OPT_pedantic, "extra %<;%>"); } else { tree type; /* See if this declaration is a friend. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* If there were decl-specifiers, check to see if there was a class-declaration. */ type = check_tag_decl (&decl_specifiers); /* Nested classes have already been added to the class, but a `friend' needs to be explicitly registered. */ if (friend_p) { /* If the `friend' keyword was present, the friend must be introduced with a class-key. */ if (!declares_class_or_enum && cxx_dialect < cxx0x) pedwarn (decl_spec_token_start->location, OPT_pedantic, "in C++03 a class-key must be used " "when declaring a friend"); /* In this case: template <typename T> struct A { friend struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by check_tag_decl. */ if (!type) { type = decl_specifiers.type; if (type && TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); } if (!type || !TYPE_P (type)) error_at (decl_spec_token_start->location, "friend declaration does not name a class or " "function"); else make_friend_class (current_class_type, type, /*complain=*/true); } /* If there is no TYPE, an error message will already have been issued. */ else if (!type || type == error_mark_node) ; /* An anonymous aggregate has to be handled specially; such a declaration really declares a data member (with a particular type), as opposed to a nested class. */ else if (ANON_AGGR_TYPE_P (type)) { /* Remove constructors and such from TYPE, now that we know it is an anonymous aggregate. */ fixup_anonymous_aggr (type); /* And make the corresponding data member. */ decl = build_decl (decl_spec_token_start->location, FIELD_DECL, NULL_TREE, type); /* Add it to the class. */ finish_member_declaration (decl); } else cp_parser_check_access_in_redeclaration (TYPE_NAME (type), decl_spec_token_start->location); } } else { bool assume_semicolon = false; /* See if these declarations will be friends. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes = NULL_TREE; tree first_attribute; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for a bitfield declaration. */ if (token->type == CPP_COLON || (token->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { tree identifier; tree width; /* Get the name of the bitfield. Note that we cannot just check TOKEN here because it may have been invalidated by the call to cp_lexer_peek_nth_token above. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); /* Look for attributes that apply to the bitfield. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* Create the bitfield declaration. */ decl = grokbitfield (identifier ? make_id_declarator (NULL_TREE, identifier, sfk_none) : NULL, &decl_specifiers, width, attributes); } else { cp_declarator *declarator; tree initializer; tree asm_specification; int ctor_dtor_or_conv_p; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true); /* If something went wrong parsing the declarator, make sure that we at least consume some tokens. */ if (declarator == cp_error_declarator) { /* Skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is not a semicolon, that is probably because we just skipped over the body of a function. So, we consume a semicolon if present, but do not issue an error message if it is not present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto out; } if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type, decl_specifiers.type_location); /* Look for an asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* Look for attributes that apply to the declaration. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* If it's an `=', then we have a constant-initializer or a pure-specifier. It is not correct to parse the initializer before registering the member declaration since the member declaration should be in scope while its initializer is processed. However, the rest of the front end does not yet provide an interface that allows us to handle this correctly. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* In [class.mem]: A pure-specifier shall be used only in the declaration of a virtual function. A member-declarator can contain a constant-initializer only if it declares a static member of integral or enumeration type. Therefore, if the DECLARATOR is for a function, we look for a pure-specifier; otherwise, we look for a constant-initializer. When we call `grokfield', it will perform more stringent semantics checks. */ initializer_token_start = cp_lexer_peek_token (parser->lexer); if (function_declarator_p (declarator) || (decl_specifiers.type && TREE_CODE (decl_specifiers.type) == TYPE_DECL && (TREE_CODE (TREE_TYPE (decl_specifiers.type)) == FUNCTION_TYPE))) initializer = cp_parser_pure_specifier (parser); else if (decl_specifiers.storage_class != sc_static) initializer = cp_parser_save_nsdmi (parser); else if (cxx_dialect >= cxx0x) { bool nonconst; /* Don't require a constant rvalue in C++11, since we might want a reference constant. We'll enforce constancy later. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &nonconst); } else /* Parse the initializer. */ initializer = cp_parser_constant_initializer (parser); } else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && !function_declarator_p (declarator)) { bool x; if (decl_specifiers.storage_class != sc_static) initializer = cp_parser_save_nsdmi (parser); else initializer = cp_parser_initializer (parser, &x, &x); } /* Otherwise, there is no initializer. */ else initializer = NULL_TREE; /* See if we are probably looking at a function definition. We are certainly not looking at a member-declarator. Calling `grokfield' has side-effects, so we must not do it unless we are sure that we are looking at a member-declarator. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) { /* The grammar does not allow a pure-specifier to be used when a member function is defined. (It is possible that this fact is an oversight in the standard, since a pure function may be defined outside of the class-specifier. */ if (initializer && initializer_token_start) error_at (initializer_token_start->location, "pure-specifier on function-definition"); decl = cp_parser_save_member_function_body (parser, &decl_specifiers, declarator, attributes); /* If the member was not a friend, declare it here. */ if (!friend_p) finish_member_declaration (decl); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a semicolon, consume it. */ if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); goto out; } else if (declarator->kind == cdk_function) declarator->id_loc = token->location; /* Create the declaration. */ decl = grokfield (declarator, &decl_specifiers, initializer, /*init_const_expr_p=*/true, asm_specification, attributes); } /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; /* If there is any qualification still in effect, clear it now; we will be starting fresh with the next declarator. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* If it's a `,', then there are more declarators. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* If the next token isn't a `;', then we have a parse error. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { /* The next token might be a ways away from where the actual semicolon is missing. Find the previous token and use that for our error position. */ cp_token *token = cp_lexer_previous_token (parser->lexer); error_at (token->location, "expected %<;%> at end of member declaration"); /* Assume that the user meant to provide a semicolon. If we were to cp_parser_skip_to_end_of_statement, we might skip to a semicolon inside a member function definition and issue nonsensical error messages. */ assume_semicolon = true; } if (decl) { /* Add DECL to the list of members. */ if (!friend_p) finish_member_declaration (decl); if (TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); else if (TREE_CODE (decl) == FIELD_DECL && !DECL_C_BIT_FIELD (decl) && DECL_INITIAL (decl)) /* Add DECL to the queue of NSDMI to be parsed later. */ VEC_safe_push (tree, gc, unparsed_nsdmis, decl); } if (assume_semicolon) goto out; } } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* Parse a pure-specifier. pure-specifier: = 0 Returns INTEGER_ZERO_NODE if a pure specifier is found. Otherwise, ERROR_MARK_NODE is returned. */ static tree cp_parser_pure_specifier (cp_parser* parser) { cp_token *token; /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) return error_mark_node; /* Look for the `0' token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) return error_mark_node; cp_lexer_consume_token (parser->lexer); /* Accept = default or = delete in c++0x mode. */ if (token->keyword == RID_DEFAULT || token->keyword == RID_DELETE) { maybe_warn_cpp0x (CPP0X_DEFAULTED_DELETED); return token->u.value; } /* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */ if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO)) { cp_parser_error (parser, "invalid pure specifier (only %<= 0%> is allowed)"); cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } if (PROCESSING_REAL_TEMPLATE_DECL_P ()) { error_at (token->location, "templates may not be %<virtual%>"); return error_mark_node; } return integer_zero_node; } /* Parse a constant-initializer. constant-initializer: = constant-expression Returns a representation of the constant-expression. */ static tree cp_parser_constant_initializer (cp_parser* parser) { /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) return error_mark_node; /* It is invalid to write: struct S { static const int i = { 7 }; }; */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_parser_error (parser, "a brace-enclosed initializer is not allowed here"); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); /* Skip the initializer. */ cp_parser_skip_to_closing_brace (parser); /* Look for the trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); return error_mark_node; } return cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); } /* Derived classes [gram.class.derived] */ /* Parse a base-clause. base-clause: : base-specifier-list base-specifier-list: base-specifier ... [opt] base-specifier-list , base-specifier ... [opt] Returns a TREE_LIST representing the base-classes, in the order in which they were declared. The representation of each node is as described by cp_parser_base_specifier. In the case that no bases are specified, this function will return NULL_TREE, not ERROR_MARK_NODE. */ static tree cp_parser_base_clause (cp_parser* parser) { tree bases = NULL_TREE; /* Look for the `:' that begins the list. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* Scan the base-specifier-list. */ while (true) { cp_token *token; tree base; bool pack_expansion_p = false; /* Look for the base-specifier. */ base = cp_parser_base_specifier (parser); /* Look for the (optional) ellipsis. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); pack_expansion_p = true; } /* Add BASE to the front of the list. */ if (base && base != error_mark_node) { if (pack_expansion_p) /* Make this a pack expansion type. */ TREE_VALUE (base) = make_pack_expansion (TREE_VALUE (base)); if (!check_for_bare_parameter_packs (TREE_VALUE (base))) { TREE_CHAIN (base) = bases; bases = base; } } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a comma, then the list is complete. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } /* PARSER->SCOPE may still be non-NULL at this point, if the last base class had a qualified name. However, the next name that appears is certainly not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return nreverse (bases); } /* Parse a base-specifier. base-specifier: :: [opt] nested-name-specifier [opt] class-name virtual access-specifier [opt] :: [opt] nested-name-specifier [opt] class-name access-specifier virtual [opt] :: [opt] nested-name-specifier [opt] class-name Returns a TREE_LIST. The TREE_PURPOSE will be one of ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to indicate the specifiers provided. The TREE_VALUE will be a TYPE (or the ERROR_MARK_NODE) indicating the type that was specified. */ static tree cp_parser_base_specifier (cp_parser* parser) { cp_token *token; bool done = false; bool virtual_p = false; bool duplicate_virtual_error_issued_p = false; bool duplicate_access_error_issued_p = false; bool class_scope_p, template_p; tree access = access_default_node; tree type; /* Process the optional `virtual' and `access-specifier'. */ while (!done) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Process `virtual'. */ switch (token->keyword) { case RID_VIRTUAL: /* If `virtual' appears more than once, issue an error. */ if (virtual_p && !duplicate_virtual_error_issued_p) { cp_parser_error (parser, "%<virtual%> specified more than once in base-specified"); duplicate_virtual_error_issued_p = true; } virtual_p = true; /* Consume the `virtual' token. */ cp_lexer_consume_token (parser->lexer); break; case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* If more than one access specifier appears, issue an error. */ if (access != access_default_node && !duplicate_access_error_issued_p) { cp_parser_error (parser, "more than one access specifier in base-specified"); duplicate_access_error_issued_p = true; } access = ridpointers[(int) token->keyword]; /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); break; default: done = true; break; } } /* It is not uncommon to see programs mechanically, erroneously, use the 'typename' keyword to denote (dependent) qualified types as base classes. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { token = cp_lexer_peek_token (parser->lexer); if (!processing_template_decl) error_at (token->location, "keyword %<typename%> not allowed outside of templates"); else error_at (token->location, "keyword %<typename%> not allowed in this context " "(the base class is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to pretend that we have seen the `typename' keyword at this point. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, typename_type, /*is_declaration=*/true); /* If the base class is given by a qualified name, assume that names we see are type names or templates, as appropriate. */ class_scope_p = (parser->scope && TYPE_P (parser->scope)); template_p = class_scope_p && cp_parser_optional_template_keyword (parser); if (!parser->scope && cp_lexer_next_token_is_decltype (parser->lexer)) /* DR 950 allows decltype as a base-specifier. */ type = cp_parser_decltype (parser); else { /* Otherwise, look for the class-name. */ type = cp_parser_class_name (parser, class_scope_p, template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); type = TREE_TYPE (type); } if (type == error_mark_node) return error_mark_node; return finish_base_specifier (type, access, virtual_p); } /* Exception handling [gram.exception] */ /* Parse an (optional) noexcept-specification. noexcept-specification: noexcept ( constant-expression ) [opt] If no noexcept-specification is present, returns NULL_TREE. Otherwise, if REQUIRE_CONSTEXPR is false, then either parse and return any expression if parentheses follow noexcept, or return BOOLEAN_TRUE_NODE if there are no parentheses. CONSUMED_EXPR will be set accordingly. Otherwise, returns a noexcept specification unless RETURN_COND is true, in which case a boolean condition is returned instead. */ static tree cp_parser_noexcept_specification_opt (cp_parser* parser, bool require_constexpr, bool* consumed_expr, bool return_cond) { cp_token *token; const char *saved_message; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Is it a noexcept-specification? */ if (cp_parser_is_keyword (token, RID_NOEXCEPT)) { tree expr; cp_lexer_consume_token (parser->lexer); if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); if (require_constexpr) { /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in an exception-specification"); expr = cp_parser_constant_expression (parser, false, NULL); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else { expr = cp_parser_expression (parser, false, NULL); *consumed_expr = true; } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } else { expr = boolean_true_node; if (!require_constexpr) *consumed_expr = false; } /* We cannot build a noexcept-spec right away because this will check that expr is a constexpr. */ if (!return_cond) return build_noexcept_spec (expr, tf_warning_or_error); else return expr; } else return NULL_TREE; } /* Parse an (optional) exception-specification. exception-specification: throw ( type-id-list [opt] ) Returns a TREE_LIST representing the exception-specification. The TREE_VALUE of each node is a type. */ static tree cp_parser_exception_specification_opt (cp_parser* parser) { cp_token *token; tree type_id_list; const char *saved_message; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Is it a noexcept-specification? */ type_id_list = cp_parser_noexcept_specification_opt(parser, true, NULL, false); if (type_id_list != NULL_TREE) return type_id_list; /* If it's not `throw', then there's no exception-specification. */ if (!cp_parser_is_keyword (token, RID_THROW)) return NULL_TREE; #if 0 /* Enable this once a lot of code has transitioned to noexcept? */ if (cxx_dialect == cxx0x && !in_system_header) warning (OPT_Wdeprecated, "dynamic exception specifications are " "deprecated in C++0x; use %<noexcept%> instead"); #endif /* Consume the `throw'. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a `)', then there is a type-id-list. */ if (token->type != CPP_CLOSE_PAREN) { /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in an exception-specification"); /* Parse the type-id-list. */ type_id_list = cp_parser_type_id_list (parser); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else type_id_list = empty_except_spec; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return type_id_list; } /* Parse an (optional) type-id-list. type-id-list: type-id ... [opt] type-id-list , type-id ... [opt] Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE, in the order that the types were presented. */ static tree cp_parser_type_id_list (cp_parser* parser) { tree types = NULL_TREE; while (true) { cp_token *token; tree type; /* Get the next type-id. */ type = cp_parser_type_id (parser); /* Parse the optional ellipsis. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Turn the type into a pack expansion expression. */ type = make_pack_expansion (type); } /* Add it to the list. */ types = add_exception_specifier (types, type, /*complain=*/1); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is not a `,', we are done. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return nreverse (types); } /* Parse a try-block. try-block: try compound-statement handler-seq */ static tree cp_parser_try_block (cp_parser* parser) { tree try_block; cp_parser_require_keyword (parser, RID_TRY, RT_TRY); try_block = begin_try_block (); cp_parser_compound_statement (parser, NULL, true, false); finish_try_block (try_block); cp_parser_handler_seq (parser); finish_handler_sequence (try_block); return try_block; } /* Parse a function-try-block. function-try-block: try ctor-initializer [opt] function-body handler-seq */ static bool cp_parser_function_try_block (cp_parser* parser) { tree compound_stmt; tree try_block; bool ctor_initializer_p; /* Look for the `try' keyword. */ if (!cp_parser_require_keyword (parser, RID_TRY, RT_TRY)) return false; /* Let the rest of the front end know where we are. */ try_block = begin_function_try_block (&compound_stmt); /* Parse the function-body. */ ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); /* We're done with the `try' part. */ finish_function_try_block (try_block); /* Parse the handlers. */ cp_parser_handler_seq (parser); /* We're done with the handlers. */ finish_function_handler_sequence (try_block, compound_stmt); return ctor_initializer_p; } /* Parse a handler-seq. handler-seq: handler handler-seq [opt] */ static void cp_parser_handler_seq (cp_parser* parser) { while (true) { cp_token *token; /* Parse the handler. */ cp_parser_handler (parser); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `catch' then there are no more handlers. */ if (!cp_parser_is_keyword (token, RID_CATCH)) break; } } /* Parse a handler. handler: catch ( exception-declaration ) compound-statement */ static void cp_parser_handler (cp_parser* parser) { tree handler; tree declaration; cp_parser_require_keyword (parser, RID_CATCH, RT_CATCH); handler = begin_handler (); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); declaration = cp_parser_exception_declaration (parser); finish_handler_parms (declaration, handler); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_compound_statement (parser, NULL, false, false); finish_handler (handler); } /* Parse an exception-declaration. exception-declaration: type-specifier-seq declarator type-specifier-seq abstract-declarator type-specifier-seq ... Returns a VAR_DECL for the declaration, or NULL_TREE if the ellipsis variant is used. */ static tree cp_parser_exception_declaration (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; const char *saved_message; /* If it's an ellipsis, it's easy to handle. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } /* Types may not be defined in exception-declarations. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in exception-declarations"); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/true, /*is_trailing_return=*/false, &type_specifiers); /* If it's a `)', then there is no declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) declarator = NULL; else declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; if (!type_specifiers.any_specifiers_p) return error_mark_node; return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL); } /* Parse a throw-expression. throw-expression: throw assignment-expression [opt] Returns a THROW_EXPR representing the throw-expression. */ static tree cp_parser_throw_expression (cp_parser* parser) { tree expression; cp_token* token; cp_parser_require_keyword (parser, RID_THROW, RT_THROW); token = cp_lexer_peek_token (parser->lexer); /* Figure out whether or not there is an assignment-expression following the "throw" keyword. */ if (token->type == CPP_COMMA || token->type == CPP_SEMICOLON || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE || token->type == CPP_CLOSE_BRACE || token->type == CPP_COLON) expression = NULL_TREE; else expression = cp_parser_assignment_expression (parser, /*cast_p=*/false, NULL); return build_throw (expression); } /* GNU Extensions */ /* Parse an (optional) asm-specification. asm-specification: asm ( string-literal ) If the asm-specification is present, returns a STRING_CST corresponding to the string-literal. Otherwise, returns NULL_TREE. */ static tree cp_parser_asm_specification_opt (cp_parser* parser) { cp_token *token; tree asm_specification; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token isn't the `asm' keyword, then there's no asm-specification. */ if (!cp_parser_is_keyword (token, RID_ASM)) return NULL_TREE; /* Consume the `asm' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Look for the string-literal. */ asm_specification = cp_parser_string_literal (parser, false, false); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return asm_specification; } /* Parse an asm-operand-list. asm-operand-list: asm-operand asm-operand-list , asm-operand asm-operand: string-literal ( expression ) [ string-literal ] string-literal ( expression ) Returns a TREE_LIST representing the operands. The TREE_VALUE of each node is the expression. The TREE_PURPOSE is itself a TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed string-literal (or NULL_TREE if not present) and whose TREE_VALUE is a STRING_CST for the string literal before the parenthesis. Returns ERROR_MARK_NODE if any of the operands are invalid. */ static tree cp_parser_asm_operand_list (cp_parser* parser) { tree asm_operands = NULL_TREE; bool invalid_operands = false; while (true) { tree string_literal; tree expression; tree name; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Read the operand name. */ name = cp_parser_identifier (parser); if (name != error_mark_node) name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); } else name = NULL_TREE; /* Look for the string-literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false, NULL); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); if (name == error_mark_node || string_literal == error_mark_node || expression == error_mark_node) invalid_operands = true; /* Add this operand to the list. */ asm_operands = tree_cons (build_tree_list (name, string_literal), expression, asm_operands); /* If the next token is not a `,', there are no more operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return invalid_operands ? error_mark_node : nreverse (asm_operands); } /* Parse an asm-clobber-list. asm-clobber-list: string-literal asm-clobber-list , string-literal Returns a TREE_LIST, indicating the clobbers in the order that they appeared. The TREE_VALUE of each node is a STRING_CST. */ static tree cp_parser_asm_clobber_list (cp_parser* parser) { tree clobbers = NULL_TREE; while (true) { tree string_literal; /* Look for the string literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Add it to the list. */ clobbers = tree_cons (NULL_TREE, string_literal, clobbers); /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return clobbers; } /* Parse an asm-label-list. asm-label-list: identifier asm-label-list , identifier Returns a TREE_LIST, indicating the labels in the order that they appeared. The TREE_VALUE of each node is a label. */ static tree cp_parser_asm_label_list (cp_parser* parser) { tree labels = NULL_TREE; while (true) { tree identifier, label, name; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (!error_operand_p (identifier)) { label = lookup_label (identifier); if (TREE_CODE (label) == LABEL_DECL) { TREE_USED (label) = 1; check_goto (label); name = build_string (IDENTIFIER_LENGTH (identifier), IDENTIFIER_POINTER (identifier)); labels = tree_cons (name, label, labels); } } /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return nreverse (labels); } /* Parse an (optional) series of attributes. attributes: attributes attribute attribute: __attribute__ (( attribute-list [opt] )) The return value is as for cp_parser_attribute_list. */ static tree cp_parser_attributes_opt (cp_parser* parser) { tree attributes = NULL_TREE; while (true) { cp_token *token; tree attribute_list; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `__attribute__', then we're done. */ if (token->keyword != RID_ATTRIBUTE) break; /* Consume the `__attribute__' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the two `(' tokens. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_CLOSE_PAREN) /* Parse the attribute-list. */ attribute_list = cp_parser_attribute_list (parser); else /* If the next token is a `)', then there is no attribute list. */ attribute_list = NULL; /* Look for the two `)' tokens. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Add these new attributes to the list. */ attributes = chainon (attributes, attribute_list); } return attributes; } /* Parse an attribute-list. attribute-list: attribute attribute-list , attribute attribute: identifier identifier ( identifier ) identifier ( identifier , expression-list ) identifier ( expression-list ) Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds to an attribute. The TREE_PURPOSE of each node is the identifier indicating which attribute is in use. The TREE_VALUE represents the arguments, if any. */ static tree cp_parser_attribute_list (cp_parser* parser) { tree attribute_list = NULL_TREE; bool save_translate_strings_p = parser->translate_strings_p; parser->translate_strings_p = false; while (true) { cp_token *token; tree identifier; tree attribute; /* Look for the identifier. We also allow keywords here; for example `__attribute__ ((const))' is legal. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { tree arguments = NULL_TREE; /* Consume the token. */ token = cp_lexer_consume_token (parser->lexer); /* Save away the identifier that indicates which attribute this is. */ identifier = (token->type == CPP_KEYWORD) /* For keywords, use the canonical spelling, not the parsed identifier. */ ? ridpointers[(int) token->keyword] : token->u.value; attribute = build_tree_list (identifier, NULL_TREE); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an `(', then parse the attribute arguments. */ if (token->type == CPP_OPEN_PAREN) { VEC(tree,gc) *vec; int attr_flag = (attribute_takes_identifier_p (identifier) ? id_attr : normal_attr); vec = cp_parser_parenthesized_expression_list (parser, attr_flag, /*cast_p=*/false, /*allow_expansion_p=*/false, /*non_constant_p=*/NULL); if (vec == NULL) arguments = error_mark_node; else { arguments = build_tree_list_vec (vec); release_tree_vector (vec); } /* Save the arguments away. */ TREE_VALUE (attribute) = arguments; } if (arguments != error_mark_node) { /* Add this attribute to the list. */ TREE_CHAIN (attribute) = attribute_list; attribute_list = attribute; } token = cp_lexer_peek_token (parser->lexer); } /* Now, look for more attributes. If the next token isn't a `,', we're done. */ if (token->type != CPP_COMMA) break; /* Consume the comma and keep going. */ cp_lexer_consume_token (parser->lexer); } parser->translate_strings_p = save_translate_strings_p; /* We built up the list in reverse order. */ return nreverse (attribute_list); } /* Parse an optional `__extension__' keyword. Returns TRUE if it is present, and FALSE otherwise. *SAVED_PEDANTIC is set to the current value of the PEDANTIC flag, regardless of whether or not the `__extension__' keyword is present. The caller is responsible for restoring the value of the PEDANTIC flag. */ static bool cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic) { /* Save the old value of the PEDANTIC flag. */ *saved_pedantic = pedantic; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION)) { /* Consume the `__extension__' token. */ cp_lexer_consume_token (parser->lexer); /* We're not being pedantic while the `__extension__' keyword is in effect. */ pedantic = 0; return true; } return false; } /* Parse a label declaration. label-declaration: __label__ label-declarator-seq ; label-declarator-seq: identifier , label-declarator-seq identifier */ static void cp_parser_label_declaration (cp_parser* parser) { /* Look for the `__label__' keyword. */ cp_parser_require_keyword (parser, RID_LABEL, RT_LABEL); while (true) { tree identifier; /* Look for an identifier. */ identifier = cp_parser_identifier (parser); /* If we failed, stop. */ if (identifier == error_mark_node) break; /* Declare it as a label. */ finish_label_decl (identifier); /* If the next token is a `;', stop. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) break; /* Look for the `,' separating the label declarations. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } /* Support Functions */ /* Looks up NAME in the current scope, as given by PARSER->SCOPE. NAME should have one of the representations used for an id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE is returned. If PARSER->SCOPE is a dependent type, then a SCOPE_REF is returned. If NAME is a TEMPLATE_ID_EXPR, then it will be immediately returned; the name was already resolved when the TEMPLATE_ID_EXPR was formed. Abstractly, such entities should not be passed to this function, because they do not need to be looked up, but it is simpler to check for this special case here, rather than at the call-sites. In cases not explicitly covered above, this function returns a DECL, OVERLOAD, or baselink representing the result of the lookup. If there was no entity with the indicated NAME, the ERROR_MARK_NODE is returned. If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword (e.g., "struct") that was used. In that case bindings that do not refer to types are ignored. If IS_TEMPLATE is TRUE, bindings that do not refer to templates are ignored. If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces are ignored. If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent types. If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a TREE_LIST of candidates if name-lookup results in an ambiguity, and NULL_TREE otherwise. */ static tree cp_parser_lookup_name (cp_parser *parser, tree name, enum tag_types tag_type, bool is_template, bool is_namespace, bool check_dependency, tree *ambiguous_decls, location_t name_location) { int flags = 0; tree decl; tree object_type = parser->context->object_type; if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) flags |= LOOKUP_COMPLAIN; /* Assume that the lookup will be unambiguous. */ if (ambiguous_decls) *ambiguous_decls = NULL_TREE; /* Now that we have looked up the name, the OBJECT_TYPE (if any) is no longer valid. Note that if we are parsing tentatively, and the parse fails, OBJECT_TYPE will be automatically restored. */ parser->context->object_type = NULL_TREE; if (name == error_mark_node) return error_mark_node; /* A template-id has already been resolved; there is no lookup to do. */ if (TREE_CODE (name) == TEMPLATE_ID_EXPR) return name; if (BASELINK_P (name)) { gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name)) == TEMPLATE_ID_EXPR); return name; } /* A BIT_NOT_EXPR is used to represent a destructor. By this point, it should already have been checked to make sure that the name used matches the type being destroyed. */ if (TREE_CODE (name) == BIT_NOT_EXPR) { tree type; /* Figure out to which type this destructor applies. */ if (parser->scope) type = parser->scope; else if (object_type) type = object_type; else type = current_class_type; /* If that's not a class type, there is no destructor. */ if (!type || !CLASS_TYPE_P (type)) return error_mark_node; if (CLASSTYPE_LAZY_DESTRUCTOR (type)) lazily_declare_fn (sfk_destructor, type); if (!CLASSTYPE_DESTRUCTORS (type)) return error_mark_node; /* If it was a class type, return the destructor. */ return CLASSTYPE_DESTRUCTORS (type); } /* By this point, the NAME should be an ordinary identifier. If the id-expression was a qualified name, the qualifying scope is stored in PARSER->SCOPE at this point. */ gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE); /* Perform the lookup. */ if (parser->scope) { bool dependent_p; if (parser->scope == error_mark_node) return error_mark_node; /* If the SCOPE is dependent, the lookup must be deferred until the template is instantiated -- unless we are explicitly looking up names in uninstantiated templates. Even then, we cannot look up the name if the scope is not a class type; it might, for example, be a template type parameter. */ dependent_p = (TYPE_P (parser->scope) && dependent_scope_p (parser->scope)); if ((check_dependency || !CLASS_TYPE_P (parser->scope)) && dependent_p) /* Defer lookup. */ decl = error_mark_node; else { tree pushed_scope = NULL_TREE; /* If PARSER->SCOPE is a dependent type, then it must be a class type, and we must not be checking dependencies; otherwise, we would have processed this lookup above. So that PARSER->SCOPE is not considered a dependent base by lookup_member, we must enter the scope here. */ if (dependent_p) pushed_scope = push_scope (parser->scope); /* If the PARSER->SCOPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ decl = lookup_qualified_name (parser->scope, name, tag_type != none_type, /*complain=*/true); /* 3.4.3.1: In a lookup in which the constructor is an acceptable lookup result and the nested-name-specifier nominates a class C: * if the name specified after the nested-name-specifier, when looked up in C, is the injected-class-name of C (Clause 9), or * if the name specified after the nested-name-specifier is the same as the identifier or the simple-template-id's template- name in the last component of the nested-name-specifier, the name is instead considered to name the constructor of class C. [ Note: for example, the constructor is not an acceptable lookup result in an elaborated-type-specifier so the constructor would not be used in place of the injected-class-name. --end note ] Such a constructor name shall be used only in the declarator-id of a declaration that names a constructor or in a using-declaration. */ if (tag_type == none_type && DECL_SELF_REFERENCE_P (decl) && same_type_p (DECL_CONTEXT (decl), parser->scope)) decl = lookup_qualified_name (parser->scope, ctor_identifier, tag_type != none_type, /*complain=*/true); /* If we have a single function from a using decl, pull it out. */ if (TREE_CODE (decl) == OVERLOAD && !really_overloaded_fn (decl)) decl = OVL_FUNCTION (decl); if (pushed_scope) pop_scope (pushed_scope); } /* If the scope is a dependent type and either we deferred lookup or we did lookup but didn't find the name, rememeber the name. */ if (decl == error_mark_node && TYPE_P (parser->scope) && dependent_type_p (parser->scope)) { if (tag_type) { tree type; /* The resolution to Core Issue 180 says that `struct A::B' should be considered a type-name, even if `A' is dependent. */ type = make_typename_type (parser->scope, name, tag_type, /*complain=*/tf_error); decl = TYPE_NAME (type); } else if (is_template && (cp_parser_next_token_ends_template_argument_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))) decl = make_unbound_class_template (parser->scope, name, NULL_TREE, /*complain=*/tf_error); else decl = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, is_template); } parser->qualifying_scope = parser->scope; parser->object_scope = NULL_TREE; } else if (object_type) { tree object_decl = NULL_TREE; /* Look up the name in the scope of the OBJECT_TYPE, unless the OBJECT_TYPE is not a class. */ if (CLASS_TYPE_P (object_type)) /* If the OBJECT_TYPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ object_decl = lookup_member (object_type, name, /*protect=*/0, tag_type != none_type, tf_warning_or_error); /* Look it up in the enclosing context, too. */ decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, flags); parser->object_scope = object_type; parser->qualifying_scope = NULL_TREE; if (object_decl) decl = object_decl; } else { decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, flags); parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } /* If the lookup failed, let our caller know. */ if (!decl || decl == error_mark_node) return error_mark_node; /* Pull out the template from an injected-class-name (or multiple). */ if (is_template) decl = maybe_get_template_decl_from_type_decl (decl); /* If it's a TREE_LIST, the result of the lookup was ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { if (ambiguous_decls) *ambiguous_decls = decl; /* The error message we have to print is too complicated for cp_parser_error, so we incorporate its actions directly. */ if (!cp_parser_simulate_error (parser)) { error_at (name_location, "reference to %qD is ambiguous", name); print_candidates (decl); } return error_mark_node; } gcc_assert (DECL_P (decl) || TREE_CODE (decl) == OVERLOAD || TREE_CODE (decl) == SCOPE_REF || TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE || BASELINK_P (decl)); /* If we have resolved the name of a member declaration, check to see if the declaration is accessible. When the name resolves to set of overloaded functions, accessibility is checked when overload resolution is done. During an explicit instantiation, access is not checked at all, as per [temp.explicit]. */ if (DECL_P (decl)) check_accessibility_of_qualified_id (decl, object_type, parser->scope); maybe_record_typedef_use (decl); return decl; } /* Like cp_parser_lookup_name, but for use in the typical case where CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE, IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */ static tree cp_parser_lookup_name_simple (cp_parser* parser, tree name, location_t location) { return cp_parser_lookup_name (parser, name, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, location); } /* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in the current context, return the TYPE_DECL. If TAG_NAME_P is true, the DECL indicates the class being defined in a class-head, or declared in an elaborated-type-specifier. Otherwise, return DECL. */ static tree cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p) { /* If the TEMPLATE_DECL is being declared as part of a class-head, the translation from TEMPLATE_DECL to TYPE_DECL occurs: struct A { template <typename T> struct B; }; template <typename T> struct A::B {}; Similarly, in an elaborated-type-specifier: namespace N { struct X{}; } struct A { template <typename T> friend struct N::X; }; However, if the DECL refers to a class type, and we are in the scope of the class, then the name lookup automatically finds the TYPE_DECL created by build_self_reference rather than a TEMPLATE_DECL. For example, in: template <class T> struct S { S s; }; there is no need to handle such case. */ if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p) return DECL_TEMPLATE_RESULT (decl); return decl; } /* If too many, or too few, template-parameter lists apply to the declarator, issue an error message. Returns TRUE if all went well, and FALSE otherwise. */ static bool cp_parser_check_declarator_template_parameters (cp_parser* parser, cp_declarator *declarator, location_t declarator_location) { unsigned num_templates; /* We haven't seen any classes that involve template parameters yet. */ num_templates = 0; switch (declarator->kind) { case cdk_id: if (declarator->u.id.qualifying_scope) { tree scope; scope = declarator->u.id.qualifying_scope; while (scope && CLASS_TYPE_P (scope)) { /* You're supposed to have one `template <...>' for every template class, but you don't need one for a full specialization. For example: template <class T> struct S{}; template <> struct S<int> { void f(); }; void S<int>::f () {} is correct; there shouldn't be a `template <>' for the definition of `S<int>::f'. */ if (!CLASSTYPE_TEMPLATE_INFO (scope)) /* If SCOPE does not have template information of any kind, then it is not a template, nor is it nested within a template. */ break; if (explicit_class_specialization_p (scope)) break; if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))) ++num_templates; scope = TYPE_CONTEXT (scope); } } else if (TREE_CODE (declarator->u.id.unqualified_name) == TEMPLATE_ID_EXPR) /* If the DECLARATOR has the form `X<y>' then it uses one additional level of template parameters. */ ++num_templates; return cp_parser_check_template_parameters (parser, num_templates, declarator_location, declarator); case cdk_function: case cdk_array: case cdk_pointer: case cdk_reference: case cdk_ptrmem: return (cp_parser_check_declarator_template_parameters (parser, declarator->declarator, declarator_location)); case cdk_error: return true; default: gcc_unreachable (); } return false; } /* NUM_TEMPLATES were used in the current declaration. If that is invalid, return FALSE and issue an error messages. Otherwise, return TRUE. If DECLARATOR is non-NULL, then we are checking a declarator and we can print more accurate diagnostics. */ static bool cp_parser_check_template_parameters (cp_parser* parser, unsigned num_templates, location_t location, cp_declarator *declarator) { /* If there are the same number of template classes and parameter lists, that's OK. */ if (parser->num_template_parameter_lists == num_templates) return true; /* If there are more, but only one more, then we are referring to a member template. That's OK too. */ if (parser->num_template_parameter_lists == num_templates + 1) return true; /* If there are more template classes than parameter lists, we have something like: template <class T> void S<T>::R<T>::f (); */ if (parser->num_template_parameter_lists < num_templates) { if (declarator && !current_function_decl) error_at (location, "specializing member %<%T::%E%> " "requires %<template<>%> syntax", declarator->u.id.qualifying_scope, declarator->u.id.unqualified_name); else if (declarator) error_at (location, "invalid declaration of %<%T::%E%>", declarator->u.id.qualifying_scope, declarator->u.id.unqualified_name); else error_at (location, "too few template-parameter-lists"); return false; } /* Otherwise, there are too many template parameter lists. We have something like: template <class T> template <class U> void S::f(); */ error_at (location, "too many template-parameter-lists"); return false; } /* Parse an optional `::' token indicating that the following name is from the global namespace. If so, PARSER->SCOPE is set to the GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE, unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone. Returns the new value of PARSER->SCOPE, if the `::' token is present, and NULL_TREE otherwise. */ static tree cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a `::' token then we're starting from the global namespace, not our current location. */ if (token->type == CPP_SCOPE) { /* Consume the `::' token. */ cp_lexer_consume_token (parser->lexer); /* Set the SCOPE so that we know where to start the lookup. */ parser->scope = global_namespace; parser->qualifying_scope = global_namespace; parser->object_scope = NULL_TREE; return parser->scope; } else if (!current_scope_valid_p) { parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } return NULL_TREE; } /* Returns TRUE if the upcoming token sequence is the start of a constructor declarator. If FRIEND_P is true, the declarator is preceded by the `friend' specifier. */ static bool cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p) { bool constructor_p; tree nested_name_specifier; cp_token *next_token; /* The common case is that this is not a constructor declarator, so try to avoid doing lots of work if at all possible. It's not valid declare a constructor at function scope. */ if (parser->in_function_body) return false; /* And only certain tokens can begin a constructor declarator. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type != CPP_NAME && next_token->type != CPP_SCOPE && next_token->type != CPP_NESTED_NAME_SPECIFIER && next_token->type != CPP_TEMPLATE_ID) return false; /* Parse tentatively; we are going to roll back all of the tokens consumed here. */ cp_parser_parse_tentatively (parser); /* Assume that we are looking at a constructor declarator. */ constructor_p = true; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ nested_name_specifier = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false)); /* Outside of a class-specifier, there must be a nested-name-specifier. */ if (!nested_name_specifier && (!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type) || friend_p)) constructor_p = false; else if (nested_name_specifier == error_mark_node) constructor_p = false; /* If we have a class scope, this is easy; DR 147 says that S::S always names the constructor, and no other qualified name could. */ if (constructor_p && nested_name_specifier && CLASS_TYPE_P (nested_name_specifier)) { tree id = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*declarator_p=*/true, /*optional_p=*/false); if (is_overloaded_fn (id)) id = DECL_NAME (get_first_fn (id)); if (!constructor_name_p (id, nested_name_specifier)) constructor_p = false; } /* If we still think that this might be a constructor-declarator, look for a class-name. */ else if (constructor_p) { /* If we have: template <typename T> struct S { S(); }; we must recognize that the nested `S' names a class. */ tree type_decl; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/false, /*class_head_p=*/false, /*is_declaration=*/false); /* If there was no class-name, then this is not a constructor. */ constructor_p = !cp_parser_error_occurred (parser); /* If we're still considering a constructor, we have to see a `(', to begin the parameter-declaration-clause, followed by either a `)', an `...', or a decl-specifier. We need to check for a type-specifier to avoid being fooled into thinking that: S (f) (int); is a constructor. (It is actually a function named `f' that takes one parameter (of type `int') and returns a value of type `S'. */ if (constructor_p && !cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) constructor_p = false; if (constructor_p && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS) /* A parameter declaration begins with a decl-specifier, which is either the "attribute" keyword, a storage class specifier, or (usually) a type-specifier. */ && !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer)) { tree type; tree pushed_scope = NULL_TREE; unsigned saved_num_template_parameter_lists; /* Names appearing in the type-specifier should be looked up in the scope of the class. */ if (current_class_type) type = NULL_TREE; else { type = TREE_TYPE (type_decl); if (TREE_CODE (type) == TYPENAME_TYPE) { type = resolve_typename_type (type, /*only_current_p=*/false); if (TREE_CODE (type) == TYPENAME_TYPE) { cp_parser_abort_tentative_parse (parser); return false; } } pushed_scope = push_scope (type); } /* Inside the constructor parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* Look for the type-specifier. */ cp_parser_type_specifier (parser, CP_PARSER_FLAGS_NONE, /*decl_specs=*/NULL, /*is_declarator=*/true, /*declares_class_or_enum=*/NULL, /*is_cv_qualifier=*/NULL); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* Leave the scope of the class. */ if (pushed_scope) pop_scope (pushed_scope); constructor_p = !cp_parser_error_occurred (parser); } } /* We did not really want to consume any tokens. */ cp_parser_abort_tentative_parse (parser); return constructor_p; } /* Parse the definition of the function given by the DECL_SPECIFIERS, ATTRIBUTES, and DECLARATOR. The access checks have been deferred; they must be performed once we are in the scope of the function. Returns the function defined. */ static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, tree attributes, const cp_declarator *declarator) { tree fn; bool success_p; /* Begin the function-definition. */ success_p = start_function (decl_specifiers, declarator, attributes); /* The things we're about to see are not directly qualified by any template headers we've seen thus far. */ reset_specialization (); /* If there were names looked up in the decl-specifier-seq that we did not check, check them now. We must wait until we are in the scope of the function to perform the checks, since the function might be a friend. */ perform_deferred_access_checks (); if (!success_p) { /* Skip the entire function. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = error_mark_node; } else if (DECL_INITIAL (current_function_decl) != error_mark_node) { /* Seen already, skip it. An error message has already been output. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = current_function_decl; current_function_decl = NULL_TREE; /* If this is a function from a class, pop the nested class. */ if (current_class_name) pop_nested_class (); } else { timevar_id_t tv; if (DECL_DECLARED_INLINE_P (current_function_decl)) tv = TV_PARSE_INLINE; else tv = TV_PARSE_FUNC; timevar_push (tv); fn = cp_parser_function_definition_after_declarator (parser, /*inline_p=*/false); timevar_pop (tv); } return fn; } /* Parse the part of a function-definition that follows the declarator. INLINE_P is TRUE iff this function is an inline function defined within a class-specifier. Returns the function defined. */ static tree cp_parser_function_definition_after_declarator (cp_parser* parser, bool inline_p) { tree fn; bool ctor_initializer_p = false; bool saved_in_unbraced_linkage_specification_p; bool saved_in_function_body; unsigned saved_num_template_parameter_lists; cp_token *token; saved_in_function_body = parser->in_function_body; parser->in_function_body = true; /* If the next token is `return', then the code may be trying to make use of the "named return value" extension that G++ used to support. */ token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN)) { /* Consume the `return' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier that indicates what value is to be returned. */ cp_parser_identifier (parser); /* Issue an error message. */ error_at (token->location, "named return values are no longer supported"); /* Skip tokens until we reach the start of the function body. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; cp_lexer_consume_token (parser->lexer); } } /* The `extern' in `extern "C" void f () { ... }' does not apply to anything declared inside `f'. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Inside the function, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; start_lambda_scope (current_function_decl); /* If the next token is `try', `__transaction_atomic', or `__transaction_relaxed`, then we are looking at either function-try-block or function-transaction-block. Note that all of these include the function-body. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC)) ctor_initializer_p = cp_parser_function_transaction (parser, RID_TRANSACTION_ATOMIC); else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_RELAXED)) ctor_initializer_p = cp_parser_function_transaction (parser, RID_TRANSACTION_RELAXED); else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); finish_lambda_scope (); /* Finish the function. */ fn = finish_function ((ctor_initializer_p ? 1 : 0) | (inline_p ? 2 : 0)); /* Generate code for it, if necessary. */ expand_or_defer_fn (fn); /* Restore the saved values. */ parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_function_body = saved_in_function_body; return fn; } /* Parse a template-declaration, assuming that the `export' (and `extern') keywords, if present, has already been scanned. MEMBER_P is as for cp_parser_template_declaration. */ static void cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p) { tree decl = NULL_TREE; VEC (deferred_access_check,gc) *checks; tree parameter_list; bool friend_p = false; bool need_lang_pop; cp_token *token; /* Look for the `template' keyword. */ token = cp_lexer_peek_token (parser->lexer); if (!cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE)) return; /* And the `<'. */ if (!cp_parser_require (parser, CPP_LESS, RT_LESS)) return; if (at_class_scope_p () && current_function_decl) { /* 14.5.2.2 [temp.mem] A local class shall not have member templates. */ error_at (token->location, "invalid declaration of member template in local class"); cp_parser_skip_to_end_of_block_or_statement (parser); return; } /* [temp] A template ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error_at (token->location, "template with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* We cannot perform access checks on the template parameter declarations until we know what is being declared, just as we cannot check the decl-specifier list. */ push_deferring_access_checks (dk_deferred); /* If the next token is `>', then we have an invalid specialization. Rather than complain about an invalid template parameter, issue an error message here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)) { cp_parser_error (parser, "invalid explicit specialization"); begin_specialization (); parameter_list = NULL_TREE; } else { /* Parse the template parameters. */ parameter_list = cp_parser_template_parameter_list (parser); } /* Get the deferred access checks from the parameter list. These will be checked once we know what is being declared, as for a member template the checks must be performed in the scope of the class containing the member. */ checks = get_deferred_access_checks (); /* Look for the `>'. */ cp_parser_skip_to_end_of_template_parameter_list (parser); /* We just processed one more parameter list. */ ++parser->num_template_parameter_lists; /* If the next token is `template', there are more template parameters. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) cp_parser_template_declaration_after_export (parser, member_p); else if (cxx_dialect >= cxx0x && cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) decl = cp_parser_alias_declaration (parser); else { /* There are no access checks when parsing a template, as we do not know if a specialization will be a friend. */ push_deferring_access_checks (dk_no_check); token = cp_lexer_peek_token (parser->lexer); decl = cp_parser_single_declaration (parser, checks, member_p, /*explicit_specialization_p=*/false, &friend_p); pop_deferring_access_checks (); /* If this is a member template declaration, let the front end know. */ if (member_p && !friend_p && decl) { if (TREE_CODE (decl) == TYPE_DECL) cp_parser_check_access_in_redeclaration (decl, token->location); decl = finish_member_template_decl (decl); } else if (friend_p && decl && TREE_CODE (decl) == TYPE_DECL) make_friend_class (current_class_type, TREE_TYPE (decl), /*complain=*/true); } /* We are done with the current parameter list. */ --parser->num_template_parameter_lists; pop_deferring_access_checks (); /* Finish up. */ finish_template_decl (parameter_list); /* Check the template arguments for a literal operator template. */ if (decl && (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl)) && UDLIT_OPER_P (DECL_NAME (decl))) { bool ok = true; if (parameter_list == NULL_TREE) ok = false; else { int num_parms = TREE_VEC_LENGTH (parameter_list); if (num_parms != 1) ok = false; else { tree parm_list = TREE_VEC_ELT (parameter_list, 0); tree parm = INNERMOST_TEMPLATE_PARMS (parm_list); if (TREE_TYPE (parm) != char_type_node || !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) ok = false; } } if (!ok) error ("literal operator template %qD has invalid parameter list." " Expected non-type template argument pack <char...>", decl); } /* Register member declarations. */ if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl)) finish_member_declaration (decl); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* If DECL is a function template, we must return to parse it later. (Even though there is no definition, there might be default arguments that need handling.) */ if (member_p && decl && (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl))) VEC_safe_push (tree, gc, unparsed_funs_with_definitions, decl); } /* Perform the deferred access checks from a template-parameter-list. CHECKS is a TREE_LIST of access checks, as returned by get_deferred_access_checks. */ static void cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks) { ++processing_template_parmlist; perform_access_checks (checks); --processing_template_parmlist; } /* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or `function-definition' sequence. MEMBER_P is true, this declaration appears in a class scope. Returns the DECL for the declared entity. If FRIEND_P is non-NULL, *FRIEND_P is set to TRUE iff the declaration is a friend. */ static tree cp_parser_single_declaration (cp_parser* parser, VEC (deferred_access_check,gc)* checks, bool member_p, bool explicit_specialization_p, bool* friend_p) { int declares_class_or_enum; tree decl = NULL_TREE; cp_decl_specifier_seq decl_specifiers; bool function_definition_p = false; cp_token *decl_spec_token_start; /* This function is only used when processing a template declaration. */ gcc_assert (innermost_scope_kind () == sk_template_parms || innermost_scope_kind () == sk_template_spec); /* Defer access checks until we know what is being declared. */ push_deferring_access_checks (dk_deferred); /* Try the `decl-specifier-seq [opt] init-declarator [opt]' alternative. */ decl_spec_token_start = cp_lexer_peek_token (parser->lexer); cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); if (friend_p) *friend_p = cp_parser_friend_p (&decl_specifiers); /* There are no template typedefs. */ if (decl_specifiers.specs[(int) ds_typedef]) { error_at (decl_spec_token_start->location, "template declaration of %<typedef%>"); decl = error_mark_node; } /* Gather up the access checks that occurred the decl-specifier-seq. */ stop_deferring_access_checks (); /* Check for the declaration of a template class. */ if (declares_class_or_enum) { if (cp_parser_declares_only_class_p (parser)) { decl = shadow_tag (&decl_specifiers); /* In this case: struct C { friend template <typename T> struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by shadow_tag. */ if (friend_p && *friend_p && !decl && decl_specifiers.type && TYPE_P (decl_specifiers.type)) decl = decl_specifiers.type; if (decl && decl != error_mark_node) decl = TYPE_NAME (decl); else decl = error_mark_node; /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); } } /* Complain about missing 'typename' or other invalid type names. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* cp_parser_parse_and_diagnose_invalid_type_name calls cp_parser_skip_to_end_of_block_or_statement, so don't try to parse the rest of this declaration. */ decl = error_mark_node; goto out; } /* If it's not a template class, try for a template function. If the next token is a `;', then this declaration does not declare anything. But, if there were errors in the decl-specifiers, then the error might well have come from an attempted class-specifier. In that case, there's no need to warn about a missing declarator. */ if (!decl && (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) || decl_specifiers.type != error_mark_node)) { decl = cp_parser_init_declarator (parser, &decl_specifiers, checks, /*function_definition_allowed_p=*/true, member_p, declares_class_or_enum, &function_definition_p, NULL); /* 7.1.1-1 [dcl.stc] A storage-class-specifier shall not be specified in an explicit specialization... */ if (decl && explicit_specialization_p && decl_specifiers.storage_class != sc_none) { error_at (decl_spec_token_start->location, "explicit template specialization cannot have a storage class"); decl = error_mark_node; } } /* Look for a trailing `;' after the declaration. */ if (!function_definition_p && (decl == error_mark_node || !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))) cp_parser_skip_to_end_of_block_or_statement (parser); out: pop_deferring_access_checks (); /* Clear any current qualification; whatever comes next is the start of something new. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return decl; } /* Parse a cast-expression that is not the operand of a unary "&". */ static tree cp_parser_simple_cast_expression (cp_parser *parser) { return cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); } /* Parse a functional cast to TYPE. Returns an expression representing the cast. */ static tree cp_parser_functional_cast (cp_parser* parser, tree type) { VEC(tree,gc) *vec; tree expression_list; tree cast; bool nonconst_p; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expression_list = cp_parser_braced_list (parser, &nonconst_p); CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1; if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); return finish_compound_literal (type, expression_list, tf_warning_or_error); } vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/true, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) expression_list = error_mark_node; else { expression_list = build_tree_list_vec (vec); release_tree_vector (vec); } cast = build_functional_cast (type, expression_list, tf_warning_or_error); /* [expr.const]/1: In an integral constant expression "only type conversions to integral or enumeration type can be used". */ if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); if (cast != error_mark_node && !cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CONSTRUCTOR)) return error_mark_node; return cast; } /* Save the tokens that make up the body of a member function defined in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have already been parsed. The ATTRIBUTES are any GNU "__attribute__" specifiers applied to the declaration. Returns the FUNCTION_DECL for the member function. */ static tree cp_parser_save_member_function_body (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree attributes) { cp_token *first; cp_token *last; tree fn; /* Create the FUNCTION_DECL. */ fn = grokmethod (decl_specifiers, declarator, attributes); /* If something went badly wrong, bail out now. */ if (fn == error_mark_node) { /* If there's a function-body, skip it. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } /* Remember it, if there default args to post process. */ cp_parser_save_default_args (parser, fn); /* Save away the tokens that make up the body of the function. */ first = parser->lexer->next_token; /* We can have braced-init-list mem-initializers before the fn body. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_lexer_consume_token (parser->lexer); while (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && cp_lexer_next_token_is_not_keyword (parser->lexer, RID_TRY)) { /* cache_group will stop after an un-nested { } pair, too. */ if (cp_parser_cache_group (parser, CPP_CLOSE_PAREN, /*depth=*/0)) break; /* variadic mem-inits have ... after the ')'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) cp_lexer_consume_token (parser->lexer); } } cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); /* Handle function try blocks. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH)) cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); last = parser->lexer->next_token; /* Save away the inline definition; we will process it when the class is complete. */ DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last); DECL_PENDING_INLINE_P (fn) = 1; /* We need to know that this was defined in the class, so that friend templates are handled correctly. */ DECL_INITIALIZED_IN_CLASS_P (fn) = 1; /* Add FN to the queue of functions to be parsed later. */ VEC_safe_push (tree, gc, unparsed_funs_with_definitions, fn); return fn; } /* Save the tokens that make up the in-class initializer for a non-static data member. Returns a DEFAULT_ARG. */ static tree cp_parser_save_nsdmi (cp_parser* parser) { return cp_parser_cache_defarg (parser, /*nsdmi=*/true); } /* Parse a template-argument-list, as well as the trailing ">" (but not the opening "<"). See cp_parser_template_argument_list for the return value. */ static tree cp_parser_enclosed_template_argument_list (cp_parser* parser) { tree arguments; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; bool saved_greater_than_is_operator_p; int saved_unevaluated_operand; int saved_inhibit_evaluation_warnings; /* [temp.names] When parsing a template-id, the first non-nested `>' is taken as the end of the template-argument-list rather than a greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = false; /* Parsing the argument list may modify SCOPE, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ saved_unevaluated_operand = cp_unevaluated_operand; cp_unevaluated_operand = 0; saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; c_inhibit_evaluation_warnings = 0; /* Parse the template-argument-list itself. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER) || cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) arguments = NULL_TREE; else arguments = cp_parser_template_argument_list (parser); /* Look for the `>' that ends the template-argument-list. If we find a '>>' instead, it's probably just a typo. */ if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { if (cxx_dialect != cxx98) { /* In C++0x, a `>>' in a template argument list or cast expression is considered to be two separate `>' tokens. So, change the current token to a `>', but don't consume it: it will be consumed later when the outer template argument list (or cast expression) is parsed. Note that this replacement of `>' for `>>' is necessary even if we are parsing tentatively: in the tentative case, after calling cp_parser_enclosed_template_argument_list we will always throw away all of the template arguments and the first closing `>', either because the template argument list was erroneous or because we are replacing those tokens with a CPP_TEMPLATE_ID token. The second `>' (which will not have been thrown away) is needed either to close an outer template argument list or to complete a new-style cast. */ cp_token *token = cp_lexer_peek_token (parser->lexer); token->type = CPP_GREATER; } else if (!saved_greater_than_is_operator_p) { /* If we're in a nested template argument list, the '>>' has to be a typo for '> >'. We emit the error message, but we continue parsing and we push a '>' as next token, so that the argument list will be parsed correctly. Note that the global source location is still on the token before the '>>', so we need to say explicitly where we want it. */ cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "%<>>%> should be %<> >%> " "within a nested template argument list"); token->type = CPP_GREATER; } else { /* If this is not a nested template argument list, the '>>' is a typo for '>'. Emit an error message and continue. Same deal about the token location, but here we can get it right by consuming the '>>' before issuing the diagnostic. */ cp_token *token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "spurious %<>>%>, use %<>%> to terminate " "a template argument list"); } } else cp_parser_skip_to_end_of_template_parameter_list (parser); /* The `>' token might be a greater-than operator again now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Restore the SAVED_SCOPE. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return arguments; } /* MEMBER_FUNCTION is a member function, or a friend. If default arguments, or the body of the function have not yet been parsed, parse them now. */ static void cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function) { timevar_push (TV_PARSE_INMETH); /* If this member is a template, get the underlying FUNCTION_DECL. */ if (DECL_FUNCTION_TEMPLATE_P (member_function)) member_function = DECL_TEMPLATE_RESULT (member_function); /* There should not be any class definitions in progress at this point; the bodies of members are only parsed outside of all class definitions. */ gcc_assert (parser->num_classes_being_defined == 0); /* While we're parsing the member functions we might encounter more classes. We want to handle them right away, but we don't want them getting mixed up with functions that are currently in the queue. */ push_unparsed_function_queues (parser); /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (member_function); /* If the body of the function has not yet been parsed, parse it now. */ if (DECL_PENDING_INLINE_P (member_function)) { tree function_scope; cp_token_cache *tokens; /* The function is no longer pending; we are processing it. */ tokens = DECL_PENDING_INLINE_INFO (member_function); DECL_PENDING_INLINE_INFO (member_function) = NULL; DECL_PENDING_INLINE_P (member_function) = 0; /* If this is a local class, enter the scope of the containing function. */ function_scope = current_function_decl; if (function_scope) push_function_context (); /* Push the body of the function onto the lexer stack. */ cp_parser_push_lexer_for_tokens (parser, tokens); /* Let the front end know that we going to be defining this function. */ start_preparsed_function (member_function, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); /* Don't do access checking if it is a templated function. */ if (processing_template_decl) push_deferring_access_checks (dk_no_check); /* Now, parse the body of the function. */ cp_parser_function_definition_after_declarator (parser, /*inline_p=*/true); if (processing_template_decl) pop_deferring_access_checks (); /* Leave the scope of the containing function. */ if (function_scope) pop_function_context (); cp_parser_pop_lexer (parser); } /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); /* Restore the queue. */ pop_unparsed_function_queues (parser); timevar_pop (TV_PARSE_INMETH); } /* If DECL contains any default args, remember it on the unparsed functions queue. */ static void cp_parser_save_default_args (cp_parser* parser, tree decl) { tree probe; for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl)); probe; probe = TREE_CHAIN (probe)) if (TREE_PURPOSE (probe)) { cp_default_arg_entry *entry = VEC_safe_push (cp_default_arg_entry, gc, unparsed_funs_with_default_args, NULL); entry->class_type = current_class_type; entry->decl = decl; break; } } /* DEFAULT_ARG contains the saved tokens for the initializer of DECL, which is either a FIELD_DECL or PARM_DECL. Parse it and return the result. For a PARM_DECL, PARMTYPE is the corresponding type from the parameter-type-list. */ static tree cp_parser_late_parse_one_default_arg (cp_parser *parser, tree decl, tree default_arg, tree parmtype) { cp_token_cache *tokens; tree parsed_arg; bool dummy; if (default_arg == error_mark_node) return error_mark_node; /* Push the saved tokens for the default argument onto the parser's lexer stack. */ tokens = DEFARG_TOKENS (default_arg); cp_parser_push_lexer_for_tokens (parser, tokens); start_lambda_scope (decl); /* Parse the default argument. */ parsed_arg = cp_parser_initializer (parser, &dummy, &dummy); if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); finish_lambda_scope (); if (!processing_template_decl) { /* In a non-template class, check conversions now. In a template, we'll wait and instantiate these as needed. */ if (TREE_CODE (decl) == PARM_DECL) parsed_arg = check_default_argument (parmtype, parsed_arg); else { int flags = LOOKUP_IMPLICIT; if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg) && CONSTRUCTOR_IS_DIRECT_INIT (parsed_arg)) flags = LOOKUP_NORMAL; parsed_arg = digest_init_flags (TREE_TYPE (decl), parsed_arg, flags); } } /* If the token stream has not been completely used up, then there was extra junk after the end of the default argument. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { if (TREE_CODE (decl) == PARM_DECL) cp_parser_error (parser, "expected %<,%>"); else cp_parser_error (parser, "expected %<;%>"); } /* Revert to the main lexer. */ cp_parser_pop_lexer (parser); return parsed_arg; } /* FIELD is a non-static data member with an initializer which we saved for later; parse it now. */ static void cp_parser_late_parsing_nsdmi (cp_parser *parser, tree field) { tree def; push_unparsed_function_queues (parser); def = cp_parser_late_parse_one_default_arg (parser, field, DECL_INITIAL (field), NULL_TREE); pop_unparsed_function_queues (parser); DECL_INITIAL (field) = def; } /* FN is a FUNCTION_DECL which may contains a parameter with an unparsed DEFAULT_ARG. Parse the default args now. This function assumes that the current scope is the scope in which the default argument should be processed. */ static void cp_parser_late_parsing_default_args (cp_parser *parser, tree fn) { bool saved_local_variables_forbidden_p; tree parm, parmdecl; /* While we're parsing the default args, we might (due to the statement expression extension) encounter more classes. We want to handle them right away, but we don't want them getting mixed up with default args that are currently in the queue. */ push_unparsed_function_queues (parser); /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; push_defarg_context (fn); for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)), parmdecl = DECL_ARGUMENTS (fn); parm && parm != void_list_node; parm = TREE_CHAIN (parm), parmdecl = DECL_CHAIN (parmdecl)) { tree default_arg = TREE_PURPOSE (parm); tree parsed_arg; VEC(tree,gc) *insts; tree copy; unsigned ix; if (!default_arg) continue; if (TREE_CODE (default_arg) != DEFAULT_ARG) /* This can happen for a friend declaration for a function already declared with default arguments. */ continue; parsed_arg = cp_parser_late_parse_one_default_arg (parser, parmdecl, default_arg, TREE_VALUE (parm)); if (parsed_arg == error_mark_node) { continue; } TREE_PURPOSE (parm) = parsed_arg; /* Update any instantiations we've already created. */ for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0; VEC_iterate (tree, insts, ix, copy); ix++) TREE_PURPOSE (copy) = parsed_arg; } pop_defarg_context (); /* Make sure no default arg is missing. */ check_default_args (fn); /* Restore the state of local_variables_forbidden_p. */ parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; /* Restore the queue. */ pop_unparsed_function_queues (parser); } /* Parse the operand of `sizeof' (or a similar operator). Returns either a TYPE or an expression, depending on the form of the input. The KEYWORD indicates which kind of expression we have encountered. */ static tree cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword) { tree expr = NULL_TREE; const char *saved_message; char *tmp; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; bool pack_expansion_p = false; /* Types cannot be defined in a `sizeof' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ tmp = concat ("types may not be defined in %<", IDENTIFIER_POINTER (ridpointers[keyword]), "%> expressions", NULL); parser->type_definition_forbidden_message = tmp; /* The restrictions on constant-expressions do not apply inside sizeof expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* If it's a `...', then we are computing the length of a parameter pack. */ if (keyword == RID_SIZEOF && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); /* Note that this is an expansion. */ pack_expansion_p = true; } /* Do not actually evaluate the expression. */ ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; /* If it's a `(', then we might be looking at the type-id construction. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type; bool saved_in_type_id_in_expr_p; /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Now, look for the trailing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, then we're done. */ if (cp_parser_parse_definitely (parser)) { cp_decl_specifier_seq decl_specs; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type; /* Call grokdeclarator to figure out what type this is. */ expr = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); } } /* If the type-id production did not work out, then we must be looking at the unary-expression production. */ if (!expr) expr = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (pack_expansion_p) /* Build a pack expansion. */ expr = make_pack_expansion (expr); /* Go back to evaluating expressions. */ --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; /* Free the message we created. */ free (tmp); /* And restore the old one. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expr; } /* If the current declaration has no declarator, return true. */ static bool cp_parser_declares_only_class_p (cp_parser *parser) { /* If the next token is a `;' or a `,' then there is no declarator. */ return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); } /* Update the DECL_SPECS to reflect the storage class indicated by KEYWORD. */ static void cp_parser_set_storage_class (cp_parser *parser, cp_decl_specifier_seq *decl_specs, enum rid keyword, location_t location) { cp_storage_class storage_class; if (parser->in_unbraced_linkage_specification_p) { error_at (location, "invalid use of %qD in linkage specification", ridpointers[keyword]); return; } else if (decl_specs->storage_class != sc_none) { decl_specs->conflicting_specifiers_p = true; return; } if ((keyword == RID_EXTERN || keyword == RID_STATIC) && decl_specs->specs[(int) ds_thread]) { error_at (location, "%<__thread%> before %qD", ridpointers[keyword]); decl_specs->specs[(int) ds_thread] = 0; } switch (keyword) { case RID_AUTO: storage_class = sc_auto; break; case RID_REGISTER: storage_class = sc_register; break; case RID_STATIC: storage_class = sc_static; break; case RID_EXTERN: storage_class = sc_extern; break; case RID_MUTABLE: storage_class = sc_mutable; break; default: gcc_unreachable (); } decl_specs->storage_class = storage_class; /* A storage class specifier cannot be applied alongside a typedef specifier. If there is a typedef specifier present then set conflicting_specifiers_p which will trigger an error later on in grokdeclarator. */ if (decl_specs->specs[(int)ds_typedef]) decl_specs->conflicting_specifiers_p = true; } /* Update the DECL_SPECS to reflect the TYPE_SPEC. If TYPE_DEFINITION_P is true, the type is a class or enum definition. */ static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs, tree type_spec, location_t location, bool type_definition_p) { decl_specs->any_specifiers_p = true; /* If the user tries to redeclare bool, char16_t, char32_t, or wchar_t (with, for example, in "typedef int wchar_t;") we remember that this is what happened. In system headers, we ignore these declarations so that G++ can work with system headers that are not C++-safe. */ if (decl_specs->specs[(int) ds_typedef] && !type_definition_p && (type_spec == boolean_type_node || type_spec == char16_type_node || type_spec == char32_type_node || type_spec == wchar_type_node) && (decl_specs->type || decl_specs->specs[(int) ds_long] || decl_specs->specs[(int) ds_short] || decl_specs->specs[(int) ds_unsigned] || decl_specs->specs[(int) ds_signed])) { decl_specs->redefined_builtin_type = type_spec; if (!decl_specs->type) { decl_specs->type = type_spec; decl_specs->type_definition_p = false; decl_specs->type_location = location; } } else if (decl_specs->type) decl_specs->multiple_types_p = true; else { decl_specs->type = type_spec; decl_specs->type_definition_p = type_definition_p; decl_specs->redefined_builtin_type = NULL_TREE; decl_specs->type_location = location; } } /* DECL_SPECIFIERS is the representation of a decl-specifier-seq. Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */ static bool cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers) { return decl_specifiers->specs[(int) ds_friend] != 0; } /* Issue an error message indicating that TOKEN_DESC was expected. If KEYWORD is true, it indicated this function is called by cp_parser_require_keword and the required token can only be a indicated keyword. */ static void cp_parser_required_error (cp_parser *parser, required_token token_desc, bool keyword) { switch (token_desc) { case RT_NEW: cp_parser_error (parser, "expected %<new%>"); return; case RT_DELETE: cp_parser_error (parser, "expected %<delete%>"); return; case RT_RETURN: cp_parser_error (parser, "expected %<return%>"); return; case RT_WHILE: cp_parser_error (parser, "expected %<while%>"); return; case RT_EXTERN: cp_parser_error (parser, "expected %<extern%>"); return; case RT_STATIC_ASSERT: cp_parser_error (parser, "expected %<static_assert%>"); return; case RT_DECLTYPE: cp_parser_error (parser, "expected %<decltype%>"); return; case RT_OPERATOR: cp_parser_error (parser, "expected %<operator%>"); return; case RT_CLASS: cp_parser_error (parser, "expected %<class%>"); return; case RT_TEMPLATE: cp_parser_error (parser, "expected %<template%>"); return; case RT_NAMESPACE: cp_parser_error (parser, "expected %<namespace%>"); return; case RT_USING: cp_parser_error (parser, "expected %<using%>"); return; case RT_ASM: cp_parser_error (parser, "expected %<asm%>"); return; case RT_TRY: cp_parser_error (parser, "expected %<try%>"); return; case RT_CATCH: cp_parser_error (parser, "expected %<catch%>"); return; case RT_THROW: cp_parser_error (parser, "expected %<throw%>"); return; case RT_LABEL: cp_parser_error (parser, "expected %<__label__%>"); return; case RT_AT_TRY: cp_parser_error (parser, "expected %<@try%>"); return; case RT_AT_SYNCHRONIZED: cp_parser_error (parser, "expected %<@synchronized%>"); return; case RT_AT_THROW: cp_parser_error (parser, "expected %<@throw%>"); return; case RT_TRANSACTION_ATOMIC: cp_parser_error (parser, "expected %<__transaction_atomic%>"); return; case RT_TRANSACTION_RELAXED: cp_parser_error (parser, "expected %<__transaction_relaxed%>"); return; default: break; } if (!keyword) { switch (token_desc) { case RT_SEMICOLON: cp_parser_error (parser, "expected %<;%>"); return; case RT_OPEN_PAREN: cp_parser_error (parser, "expected %<(%>"); return; case RT_CLOSE_BRACE: cp_parser_error (parser, "expected %<}%>"); return; case RT_OPEN_BRACE: cp_parser_error (parser, "expected %<{%>"); return; case RT_CLOSE_SQUARE: cp_parser_error (parser, "expected %<]%>"); return; case RT_OPEN_SQUARE: cp_parser_error (parser, "expected %<[%>"); return; case RT_COMMA: cp_parser_error (parser, "expected %<,%>"); return; case RT_SCOPE: cp_parser_error (parser, "expected %<::%>"); return; case RT_LESS: cp_parser_error (parser, "expected %<<%>"); return; case RT_GREATER: cp_parser_error (parser, "expected %<>%>"); return; case RT_EQ: cp_parser_error (parser, "expected %<=%>"); return; case RT_ELLIPSIS: cp_parser_error (parser, "expected %<...%>"); return; case RT_MULT: cp_parser_error (parser, "expected %<*%>"); return; case RT_COMPL: cp_parser_error (parser, "expected %<~%>"); return; case RT_COLON: cp_parser_error (parser, "expected %<:%>"); return; case RT_COLON_SCOPE: cp_parser_error (parser, "expected %<:%> or %<::%>"); return; case RT_CLOSE_PAREN: cp_parser_error (parser, "expected %<)%>"); return; case RT_COMMA_CLOSE_PAREN: cp_parser_error (parser, "expected %<,%> or %<)%>"); return; case RT_PRAGMA_EOL: cp_parser_error (parser, "expected end of line"); return; case RT_NAME: cp_parser_error (parser, "expected identifier"); return; case RT_SELECT: cp_parser_error (parser, "expected selection-statement"); return; case RT_INTERATION: cp_parser_error (parser, "expected iteration-statement"); return; case RT_JUMP: cp_parser_error (parser, "expected jump-statement"); return; case RT_CLASS_KEY: cp_parser_error (parser, "expected class-key"); return; case RT_CLASS_TYPENAME_TEMPLATE: cp_parser_error (parser, "expected %<class%>, %<typename%>, or %<template%>"); return; default: gcc_unreachable (); } } else gcc_unreachable (); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require (cp_parser* parser, enum cpp_ttype type, required_token token_desc) { if (cp_lexer_next_token_is (parser->lexer, type)) return cp_lexer_consume_token (parser->lexer); else { /* Output the MESSAGE -- unless we're parsing tentatively. */ if (!cp_parser_simulate_error (parser)) cp_parser_required_error (parser, token_desc, /*keyword=*/false); return NULL; } } /* An error message is produced if the next token is not '>'. All further tokens are skipped until the desired token is found or '{', '}', ';' or an unbalanced ')' or ']'. */ static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser) { /* Current level of '< ... >'. */ unsigned level = 0; /* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */ unsigned nesting_depth = 0; /* Are we ready, yet? If not, issue error message. */ if (cp_parser_require (parser, CPP_GREATER, RT_GREATER)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_LESS: if (!nesting_depth) ++level; break; case CPP_RSHIFT: if (cxx_dialect == cxx98) /* C++0x views the `>>' operator as two `>' tokens, but C++98 does not. */ break; else if (!nesting_depth && level-- == 0) { /* We've hit a `>>' where the first `>' closes the template argument list, and the second `>' is spurious. Just consume the `>>' and stop; we've already produced at least one error. */ cp_lexer_consume_token (parser->lexer); return; } /* Fall through for C++0x, so we handle the second `>' in the `>>'. */ case CPP_GREATER: if (!nesting_depth && level-- == 0) { /* We've reached the token we want, consume it and stop. */ cp_lexer_consume_token (parser->lexer); return; } break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: ++nesting_depth; break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: if (nesting_depth-- == 0) return; break; case CPP_EOF: case CPP_PRAGMA_EOL: case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: /* The '>' was probably forgotten, don't look further. */ return; default: break; } /* Consume this token. */ cp_lexer_consume_token (parser->lexer); } } /* If the next token is the indicated keyword, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require_keyword (cp_parser* parser, enum rid keyword, required_token token_desc) { cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc); if (token && token->keyword != keyword) { cp_parser_required_error (parser, token_desc, /*keyword=*/true); return NULL; } return token; } /* Returns TRUE iff TOKEN is a token that can begin the body of a function-definition. */ static bool cp_parser_token_starts_function_definition_p (cp_token* token) { return (/* An ordinary function-body begins with an `{'. */ token->type == CPP_OPEN_BRACE /* A ctor-initializer begins with a `:'. */ || token->type == CPP_COLON /* A function-try-block begins with `try'. */ || token->keyword == RID_TRY /* A function-transaction-block begins with `__transaction_atomic' or `__transaction_relaxed'. */ || token->keyword == RID_TRANSACTION_ATOMIC || token->keyword == RID_TRANSACTION_RELAXED /* The named return value extension begins with `return'. */ || token->keyword == RID_RETURN); } /* Returns TRUE iff the next token is the ":" or "{" beginning a class definition. */ static bool cp_parser_next_token_starts_class_definition_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_OPEN_BRACE || token->type == CPP_COLON); } /* Returns TRUE iff the next token is the "," or ">" (or `>>', in C++0x) ending a template-argument. */ static bool cp_parser_next_token_ends_template_argument_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_COMMA || token->type == CPP_GREATER || token->type == CPP_ELLIPSIS || ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)); } /* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the (n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */ static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser, size_t n) { cp_token *token; token = cp_lexer_peek_nth_token (parser->lexer, n); if (token->type == CPP_LESS) return true; /* Check for the sequence `<::' in the original code. It would be lexed as `[:', where `[' is a digraph, and there is no whitespace before `:'. */ if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH) { cp_token *token2; token2 = cp_lexer_peek_nth_token (parser->lexer, n+1); if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE)) return true; } return false; } /* Returns the kind of tag indicated by TOKEN, if it is a class-key, or none_type otherwise. */ static enum tag_types cp_parser_token_is_class_key (cp_token* token) { switch (token->keyword) { case RID_CLASS: return class_type; case RID_STRUCT: return record_type; case RID_UNION: return union_type; default: return none_type; } } /* Issue an error message if the CLASS_KEY does not match the TYPE. */ static void cp_parser_check_class_key (enum tag_types class_key, tree type) { if (type == error_mark_node) return; if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type)) { permerror (input_location, "%qs tag used in naming %q#T", class_key == union_type ? "union" : class_key == record_type ? "struct" : "class", type); inform (DECL_SOURCE_LOCATION (TYPE_NAME (type)), "%q#T was previously declared here", type); } } /* Issue an error message if DECL is redeclared with different access than its original declaration [class.access.spec/3]. This applies to nested classes and nested class templates. [class.mem/1]. */ static void cp_parser_check_access_in_redeclaration (tree decl, location_t location) { if (!decl || !CLASS_TYPE_P (TREE_TYPE (decl))) return; if ((TREE_PRIVATE (decl) != (current_access_specifier == access_private_node)) || (TREE_PROTECTED (decl) != (current_access_specifier == access_protected_node))) error_at (location, "%qD redeclared with different access", decl); } /* Look for the `template' keyword, as a syntactic disambiguator. Return TRUE iff it is present, in which case it will be consumed. */ static bool cp_parser_optional_template_keyword (cp_parser *parser) { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* The `template' keyword can only be used within templates; outside templates the parser can always figure out what is a template and what is not. */ if (!processing_template_decl) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "%<template%> (as a disambiguator) is only allowed " "within templates"); /* If this part of the token stream is rescanned, the same error message would be generated. So, we purge the token from the stream. */ cp_lexer_purge_token (parser->lexer); return false; } else { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); return true; } } return false; } /* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token, set PARSER->SCOPE, and perform other related actions. */ static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser) { int i; struct tree_check *check_value; deferred_access_check *chk; VEC (deferred_access_check,gc) *checks; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ checks = check_value->checks; if (checks) { FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk) perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl); } /* Set the scope from the stored value. */ parser->scope = check_value->value; parser->qualifying_scope = check_value->qualifying_scope; parser->object_scope = NULL_TREE; } /* Consume tokens up through a non-nested END token. Returns TRUE if we encounter the end of a block before what we were looking for. */ static bool cp_parser_cache_group (cp_parser *parser, enum cpp_ttype end, unsigned depth) { while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* Abort a parenthesized expression if we encounter a semicolon. */ if ((end == CPP_CLOSE_PAREN || depth == 0) && token->type == CPP_SEMICOLON) return true; /* If we've reached the end of the file, stop. */ if (token->type == CPP_EOF || (end != CPP_PRAGMA_EOL && token->type == CPP_PRAGMA_EOL)) return true; if (token->type == CPP_CLOSE_BRACE && depth == 0) /* We've hit the end of an enclosing block, so there's been some kind of syntax error. */ return true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* See if it starts a new group. */ if (token->type == CPP_OPEN_BRACE) { cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1); /* In theory this should probably check end == '}', but cp_parser_save_member_function_body needs it to exit after either '}' or ')' when called with ')'. */ if (depth == 0) return false; } else if (token->type == CPP_OPEN_PAREN) { cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1); if (depth == 0 && end == CPP_CLOSE_PAREN) return false; } else if (token->type == CPP_PRAGMA) cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1); else if (token->type == end) return false; } } /* Like above, for caching a default argument or NSDMI. Both of these are terminated by a non-nested comma, but it can be unclear whether or not a comma is nested in a template argument list unless we do more parsing. In order to handle this ambiguity, when we encounter a ',' after a '<' we try to parse what follows as a parameter-declaration-list (in the case of a default argument) or a member-declarator (in the case of an NSDMI). If that succeeds, then we stop caching. */ static tree cp_parser_cache_defarg (cp_parser *parser, bool nsdmi) { unsigned depth = 0; int maybe_template_id = 0; cp_token *first_token; cp_token *token; tree default_argument; /* Add tokens until we have processed the entire default argument. We add the range [first_token, token). */ first_token = cp_lexer_peek_token (parser->lexer); if (first_token->type == CPP_OPEN_BRACE) { /* For list-initialization, this is straightforward. */ cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); token = cp_lexer_peek_token (parser->lexer); } else while (true) { bool done = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* What we do depends on what token we have. */ switch (token->type) { /* In valid code, a default argument must be immediately followed by a `,' `)', or `...'. */ case CPP_COMMA: if (depth == 0 && maybe_template_id) { /* If we've seen a '<', we might be in a template-argument-list. Until Core issue 325 is resolved, we don't know how this situation ought to be handled, so try to DTRT. We check whether what comes after the comma is a valid parameter declaration list. If it is, then the comma ends the default argument; otherwise the default argument continues. */ bool error = false; tree t; /* Set ITALP so cp_parser_parameter_declaration_list doesn't decide to commit to this parse. */ bool saved_italp = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; cp_parser_parse_tentatively (parser); cp_lexer_consume_token (parser->lexer); if (nsdmi) { int ctor_dtor_or_conv_p; cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true); } else { begin_scope (sk_function_parms, NULL_TREE); cp_parser_parameter_declaration_list (parser, &error); for (t = current_binding_level->names; t; t = DECL_CHAIN (t)) pop_binding (DECL_NAME (t), t); leave_scope (); } if (!cp_parser_error_occurred (parser) && !error) done = true; cp_parser_abort_tentative_parse (parser); parser->in_template_argument_list_p = saved_italp; break; } case CPP_CLOSE_PAREN: case CPP_ELLIPSIS: /* If we run into a non-nested `;', `}', or `]', then the code is invalid -- but the default argument is certainly over. */ case CPP_SEMICOLON: case CPP_CLOSE_BRACE: case CPP_CLOSE_SQUARE: if (depth == 0) done = true; /* Update DEPTH, if necessary. */ else if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_SQUARE) --depth; break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: case CPP_OPEN_BRACE: ++depth; break; case CPP_LESS: if (depth == 0) /* This might be the comparison operator, or it might start a template argument list. */ ++maybe_template_id; break; case CPP_RSHIFT: if (cxx_dialect == cxx98) break; /* Fall through for C++0x, which treats the `>>' operator like two `>' tokens in certain cases. */ case CPP_GREATER: if (depth == 0) { /* This might be an operator, or it might close a template argument list. But if a previous '<' started a template argument list, this will have closed it, so we can't be in one anymore. */ maybe_template_id -= 1 + (token->type == CPP_RSHIFT); if (maybe_template_id < 0) maybe_template_id = 0; } break; /* If we run out of tokens, issue an error message. */ case CPP_EOF: case CPP_PRAGMA_EOL: error_at (token->location, "file ends in default argument"); done = true; break; case CPP_NAME: case CPP_SCOPE: /* In these cases, we should look for template-ids. For example, if the default argument is `X<int, double>()', we need to do name lookup to figure out whether or not `X' is a template; if so, the `,' does not end the default argument. That is not yet done. */ break; default: break; } /* If we've reached the end, stop. */ if (done) break; /* Add the token to the token block. */ token = cp_lexer_consume_token (parser->lexer); } /* Create a DEFAULT_ARG to represent the unparsed default argument. */ default_argument = make_node (DEFAULT_ARG); DEFARG_TOKENS (default_argument) = cp_token_cache_new (first_token, token); DEFARG_INSTANTIATIONS (default_argument) = NULL; return default_argument; } /* Begin parsing tentatively. We always save tokens while parsing tentatively so that if the tentative parsing fails we can restore the tokens. */ static void cp_parser_parse_tentatively (cp_parser* parser) { /* Enter a new parsing context. */ parser->context = cp_parser_context_new (parser->context); /* Begin saving tokens. */ cp_lexer_save_tokens (parser->lexer); /* In order to avoid repetitive access control error messages, access checks are queued up until we are no longer parsing tentatively. */ push_deferring_access_checks (dk_deferred); } /* Commit to the currently active tentative parse. */ static void cp_parser_commit_to_tentative_parse (cp_parser* parser) { cp_parser_context *context; cp_lexer *lexer; /* Mark all of the levels as committed. */ lexer = parser->lexer; for (context = parser->context; context->next; context = context->next) { if (context->status == CP_PARSER_STATUS_KIND_COMMITTED) break; context->status = CP_PARSER_STATUS_KIND_COMMITTED; while (!cp_lexer_saving_tokens (lexer)) lexer = lexer->next; cp_lexer_commit_tokens (lexer); } } /* Abort the currently active tentative parse. All consumed tokens will be rolled back, and no diagnostics will be issued. */ static void cp_parser_abort_tentative_parse (cp_parser* parser) { gcc_assert (parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED || errorcount > 0); cp_parser_simulate_error (parser); /* Now, pretend that we want to see if the construct was successfully parsed. */ cp_parser_parse_definitely (parser); } /* Stop parsing tentatively. If a parse error has occurred, restore the token stream. Otherwise, commit to the tokens we have consumed. Returns true if no error occurred; false otherwise. */ static bool cp_parser_parse_definitely (cp_parser* parser) { bool error_occurred; cp_parser_context *context; /* Remember whether or not an error occurred, since we are about to destroy that information. */ error_occurred = cp_parser_error_occurred (parser); /* Remove the topmost context from the stack. */ context = parser->context; parser->context = context->next; /* If no parse errors occurred, commit to the tentative parse. */ if (!error_occurred) { /* Commit to the tokens read tentatively, unless that was already done. */ if (context->status != CP_PARSER_STATUS_KIND_COMMITTED) cp_lexer_commit_tokens (parser->lexer); pop_to_parent_deferring_access_checks (); } /* Otherwise, if errors occurred, roll back our state so that things are just as they were before we began the tentative parse. */ else { cp_lexer_rollback_tokens (parser->lexer); pop_deferring_access_checks (); } /* Add the context to the front of the free list. */ context->next = cp_parser_context_free_list; cp_parser_context_free_list = context; return !error_occurred; } /* Returns true if we are parsing tentatively and are not committed to this tentative parse. */ static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED); } /* Returns nonzero iff an error has occurred during the most recent tentative parse. */ static bool cp_parser_error_occurred (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status == CP_PARSER_STATUS_KIND_ERROR); } /* Returns nonzero if GNU extensions are allowed. */ static bool cp_parser_allow_gnu_extensions_p (cp_parser* parser) { return parser->allow_gnu_extensions_p; } /* Objective-C++ Productions */ /* Parse an Objective-C expression, which feeds into a primary-expression above. objc-expression: objc-message-expression objc-string-literal objc-encode-expression objc-protocol-expression objc-selector-expression Returns a tree representation of the expression. */ static tree cp_parser_objc_expression (cp_parser* parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->type) { case CPP_OPEN_SQUARE: return cp_parser_objc_message_expression (parser); case CPP_OBJC_STRING: kwd = cp_lexer_consume_token (parser->lexer); return objc_build_string_object (kwd->u.value); case CPP_KEYWORD: switch (kwd->keyword) { case RID_AT_ENCODE: return cp_parser_objc_encode_expression (parser); case RID_AT_PROTOCOL: return cp_parser_objc_protocol_expression (parser); case RID_AT_SELECTOR: return cp_parser_objc_selector_expression (parser); default: break; } default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* Parse an Objective-C message expression. objc-message-expression: [ objc-message-receiver objc-message-args ] Returns a representation of an Objective-C message. */ static tree cp_parser_objc_message_expression (cp_parser* parser) { tree receiver, messageargs; cp_lexer_consume_token (parser->lexer); /* Eat '['. */ receiver = cp_parser_objc_message_receiver (parser); messageargs = cp_parser_objc_message_args (parser); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return objc_build_message_expr (receiver, messageargs); } /* Parse an objc-message-receiver. objc-message-receiver: expression simple-type-specifier Returns a representation of the type or expression. */ static tree cp_parser_objc_message_receiver (cp_parser* parser) { tree rcv; /* An Objective-C message receiver may be either (1) a type or (2) an expression. */ cp_parser_parse_tentatively (parser); rcv = cp_parser_expression (parser, false, NULL); if (cp_parser_parse_definitely (parser)) return rcv; rcv = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); return objc_get_class_reference (rcv); } /* Parse the arguments and selectors comprising an Objective-C message. objc-message-args: objc-selector objc-selector-args objc-selector-args , objc-comma-args objc-selector-args: objc-selector [opt] : assignment-expression objc-selector-args objc-selector [opt] : assignment-expression objc-comma-args: assignment-expression objc-comma-args , assignment-expression Returns a TREE_LIST, with TREE_PURPOSE containing a list of selector arguments and TREE_VALUE containing a list of comma arguments. */ static tree cp_parser_objc_message_args (cp_parser* parser) { tree sel_args = NULL_TREE, addl_args = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, arg; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) return build_tree_list (selector, NULL_TREE); maybe_unary_selector_p = false; cp_parser_require (parser, CPP_COLON, RT_COLON); arg = cp_parser_assignment_expression (parser, false, NULL); sel_args = chainon (sel_args, build_tree_list (selector, arg)); token = cp_lexer_peek_token (parser->lexer); } /* Handle non-selector arguments, if any. */ while (token->type == CPP_COMMA) { tree arg; cp_lexer_consume_token (parser->lexer); arg = cp_parser_assignment_expression (parser, false, NULL); addl_args = chainon (addl_args, build_tree_list (NULL_TREE, arg)); token = cp_lexer_peek_token (parser->lexer); } if (sel_args == NULL_TREE && addl_args == NULL_TREE) { cp_parser_error (parser, "objective-c++ message argument(s) are expected"); return build_tree_list (error_mark_node, error_mark_node); } return build_tree_list (sel_args, addl_args); } /* Parse an Objective-C encode expression. objc-encode-expression: @encode objc-typename Returns an encoded representation of the type argument. */ static tree cp_parser_objc_encode_expression (cp_parser* parser) { tree type; cp_token *token; cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); token = cp_lexer_peek_token (parser->lexer); type = complete_type (cp_parser_type_id (parser)); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); if (!type) { error_at (token->location, "%<@encode%> must specify a type as an argument"); return error_mark_node; } /* This happens if we find @encode(T) (where T is a template typename or something dependent on a template typename) when parsing a template. In that case, we can't compile it immediately, but we rather create an AT_ENCODE_EXPR which will need to be instantiated when the template is used. */ if (dependent_type_p (type)) { tree value = build_min (AT_ENCODE_EXPR, size_type_node, type); TREE_READONLY (value) = 1; return value; } return objc_build_encode_expr (type); } /* Parse an Objective-C @defs expression. */ static tree cp_parser_objc_defs_expression (cp_parser *parser) { tree name; cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); name = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_get_class_ivars (name); } /* Parse an Objective-C protocol expression. objc-protocol-expression: @protocol ( identifier ) Returns a representation of the protocol expression. */ static tree cp_parser_objc_protocol_expression (cp_parser* parser) { tree proto; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); proto = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_build_protocol_expr (proto); } /* Parse an Objective-C selector expression. objc-selector-expression: @selector ( objc-method-signature ) objc-method-signature: objc-selector objc-selector-seq objc-selector-seq: objc-selector : objc-selector-seq objc-selector : Returns a representation of the method selector. */ static tree cp_parser_objc_selector_expression (cp_parser* parser) { tree sel_seq = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token; location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON || token->type == CPP_SCOPE) { tree selector = NULL_TREE; if (token->type != CPP_COLON || token->type == CPP_SCOPE) selector = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE)) { /* Detect if we have a unary selector. */ if (maybe_unary_selector_p) { sel_seq = selector; goto finish_selector; } else { cp_parser_error (parser, "expected %<:%>"); } } maybe_unary_selector_p = false; token = cp_lexer_consume_token (parser->lexer); if (token->type == CPP_SCOPE) { sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); sel_seq = chainon (sel_seq, build_tree_list (NULL_TREE, NULL_TREE)); } else sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); token = cp_lexer_peek_token (parser->lexer); } finish_selector: cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_build_selector_expr (loc, sel_seq); } /* Parse a list of identifiers. objc-identifier-list: identifier objc-identifier-list , identifier Returns a TREE_LIST of identifier nodes. */ static tree cp_parser_objc_identifier_list (cp_parser* parser) { tree identifier; tree list; cp_token *sep; identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; list = build_tree_list (NULL_TREE, identifier); sep = cp_lexer_peek_token (parser->lexer); while (sep->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return list; list = chainon (list, build_tree_list (NULL_TREE, identifier)); sep = cp_lexer_peek_token (parser->lexer); } return list; } /* Parse an Objective-C alias declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; This function registers the alias mapping with the Objective-C front end. It returns nothing. */ static void cp_parser_objc_alias_declaration (cp_parser* parser) { tree alias, orig; cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */ alias = cp_parser_identifier (parser); orig = cp_parser_identifier (parser); objc_declare_alias (alias, orig); cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C class forward-declaration. objc-class-declaration: @class objc-identifier-list ; The function registers the forward declarations with the Objective-C front end. It returns nothing. */ static void cp_parser_objc_class_declaration (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */ while (true) { tree id; id = cp_parser_identifier (parser); if (id == error_mark_node) break; objc_declare_class (id); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse a list of Objective-C protocol references. objc-protocol-refs-opt: objc-protocol-refs [opt] objc-protocol-refs: < objc-identifier-list > Returns a TREE_LIST of identifiers, if any. */ static tree cp_parser_objc_protocol_refs_opt (cp_parser* parser) { tree protorefs = NULL_TREE; if(cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { cp_lexer_consume_token (parser->lexer); /* Eat '<'. */ protorefs = cp_parser_objc_identifier_list (parser); cp_parser_require (parser, CPP_GREATER, RT_GREATER); } return protorefs; } /* Parse a Objective-C visibility specification. */ static void cp_parser_objc_visibility_spec (cp_parser* parser) { cp_token *vis = cp_lexer_peek_token (parser->lexer); switch (vis->keyword) { case RID_AT_PRIVATE: objc_set_visibility (OBJC_IVAR_VIS_PRIVATE); break; case RID_AT_PROTECTED: objc_set_visibility (OBJC_IVAR_VIS_PROTECTED); break; case RID_AT_PUBLIC: objc_set_visibility (OBJC_IVAR_VIS_PUBLIC); break; case RID_AT_PACKAGE: objc_set_visibility (OBJC_IVAR_VIS_PACKAGE); break; default: return; } /* Eat '@private'/'@protected'/'@public'. */ cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C method type. Return 'true' if it is a class (+) method, and 'false' if it is an instance (-) method. */ static inline bool cp_parser_objc_method_type (cp_parser* parser) { if (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS) return true; else return false; } /* Parse an Objective-C protocol qualifier. */ static tree cp_parser_objc_protocol_qualifiers (cp_parser* parser) { tree quals = NULL_TREE, node; cp_token *token = cp_lexer_peek_token (parser->lexer); node = token->u.value; while (node && TREE_CODE (node) == IDENTIFIER_NODE && (node == ridpointers [(int) RID_IN] || node == ridpointers [(int) RID_OUT] || node == ridpointers [(int) RID_INOUT] || node == ridpointers [(int) RID_BYCOPY] || node == ridpointers [(int) RID_BYREF] || node == ridpointers [(int) RID_ONEWAY])) { quals = tree_cons (NULL_TREE, node, quals); cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); node = token->u.value; } return quals; } /* Parse an Objective-C typename. */ static tree cp_parser_objc_typename (cp_parser* parser) { tree type_name = NULL_TREE; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree proto_quals, cp_type = NULL_TREE; cp_lexer_consume_token (parser->lexer); /* Eat '('. */ proto_quals = cp_parser_objc_protocol_qualifiers (parser); /* An ObjC type name may consist of just protocol qualifiers, in which case the type shall default to 'id'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) { cp_type = cp_parser_type_id (parser); /* If the type could not be parsed, an error has already been produced. For error recovery, behave as if it had not been specified, which will use the default type 'id'. */ if (cp_type == error_mark_node) { cp_type = NULL_TREE; /* We need to skip to the closing parenthesis as cp_parser_type_id() does not seem to do it for us. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); } } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); type_name = build_tree_list (proto_quals, cp_type); } return type_name; } /* Check to see if TYPE refers to an Objective-C selector name. */ static bool cp_parser_objc_selector_p (enum cpp_ttype type) { return (type == CPP_NAME || type == CPP_KEYWORD || type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND || type == CPP_OR || type == CPP_COMPL || type == CPP_NOT || type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ || type == CPP_XOR || type == CPP_XOR_EQ); } /* Parse an Objective-C selector. */ static tree cp_parser_objc_selector (cp_parser* parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); if (!cp_parser_objc_selector_p (token->type)) { error_at (token->location, "invalid Objective-C++ selector name"); return error_mark_node; } /* C++ operator names are allowed to appear in ObjC selectors. */ switch (token->type) { case CPP_AND_AND: return get_identifier ("and"); case CPP_AND_EQ: return get_identifier ("and_eq"); case CPP_AND: return get_identifier ("bitand"); case CPP_OR: return get_identifier ("bitor"); case CPP_COMPL: return get_identifier ("compl"); case CPP_NOT: return get_identifier ("not"); case CPP_NOT_EQ: return get_identifier ("not_eq"); case CPP_OR_OR: return get_identifier ("or"); case CPP_OR_EQ: return get_identifier ("or_eq"); case CPP_XOR: return get_identifier ("xor"); case CPP_XOR_EQ: return get_identifier ("xor_eq"); default: return token->u.value; } } /* Parse an Objective-C params list. */ static tree cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes) { tree params = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, type_name, identifier; tree parm_attr = NULL_TREE; if (token->keyword == RID_ATTRIBUTE) break; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { params = selector; /* Might be followed by attributes. */ break; } maybe_unary_selector_p = false; if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) { /* Something went quite wrong. There should be a colon here, but there is not. Stop parsing parameters. */ break; } type_name = cp_parser_objc_typename (parser); /* New ObjC allows attributes on parameters too. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) parm_attr = cp_parser_attributes_opt (parser); identifier = cp_parser_identifier (parser); params = chainon (params, objc_build_keyword_decl (selector, type_name, identifier, parm_attr)); token = cp_lexer_peek_token (parser->lexer); } if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } /* We allow tail attributes for the method. */ if (token->keyword == RID_ATTRIBUTE) { *attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) return params; cp_parser_error (parser, "method attributes must be specified at the end"); return error_mark_node; } if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } return params; } /* Parse the non-keyword Objective-C params. */ static tree cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp, tree* attributes) { tree params = make_node (TREE_LIST); cp_token *token = cp_lexer_peek_token (parser->lexer); *ellipsisp = false; /* Initially, assume no ellipsis. */ while (token->type == CPP_COMMA) { cp_parameter_declarator *parmdecl; tree parm; cp_lexer_consume_token (parser->lexer); /* Eat ','. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_ELLIPSIS) { cp_lexer_consume_token (parser->lexer); /* Eat '...'. */ *ellipsisp = true; token = cp_lexer_peek_token (parser->lexer); break; } /* TODO: parse attributes for tail parameters. */ parmdecl = cp_parser_parameter_declaration (parser, false, NULL); parm = grokdeclarator (parmdecl->declarator, &parmdecl->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); chainon (params, build_tree_list (NULL_TREE, parm)); token = cp_lexer_peek_token (parser->lexer); } /* We allow tail attributes for the method. */ if (token->keyword == RID_ATTRIBUTE) { if (*attributes == NULL_TREE) { *attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) return params; } else /* We have an error, but parse the attributes, so that we can carry on. */ *attributes = cp_parser_attributes_opt (parser); cp_parser_error (parser, "method attributes must be specified at the end"); return error_mark_node; } return params; } /* Parse a linkage specification, a pragma, an extra semicolon or a block. */ static void cp_parser_objc_interstitial_code (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token->keyword == RID_EXTERN && cp_parser_is_pure_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) cp_parser_linkage_specification (parser); /* Handle #pragma, if any. */ else if (token->type == CPP_PRAGMA) cp_parser_pragma (parser, pragma_external); /* Allow stray semicolons. */ else if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); /* Mark methods as optional or required, when building protocols. */ else if (token->keyword == RID_AT_OPTIONAL) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (true); } else if (token->keyword == RID_AT_REQUIRED) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (false); } else if (token->keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* Other stray characters must generate errors. */ else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE) { cp_lexer_consume_token (parser->lexer); error ("stray %qs between Objective-C++ methods", token->type == CPP_OPEN_BRACE ? "{" : "}"); } /* Finally, try to parse a block-declaration, or a function-definition. */ else cp_parser_block_declaration (parser, /*statement_p=*/false); } /* Parse a method signature. */ static tree cp_parser_objc_method_signature (cp_parser* parser, tree* attributes) { tree rettype, kwdparms, optparms; bool ellipsis = false; bool is_class_method; is_class_method = cp_parser_objc_method_type (parser); rettype = cp_parser_objc_typename (parser); *attributes = NULL_TREE; kwdparms = cp_parser_objc_method_keyword_params (parser, attributes); if (kwdparms == error_mark_node) return error_mark_node; optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes); if (optparms == error_mark_node) return error_mark_node; return objc_build_method_signature (is_class_method, rettype, kwdparms, optparms, ellipsis); } static bool cp_parser_objc_method_maybe_bad_prefix_attributes (cp_parser* parser) { tree tattr; cp_lexer_save_tokens (parser->lexer); tattr = cp_parser_attributes_opt (parser); gcc_assert (tattr) ; /* If the attributes are followed by a method introducer, this is not allowed. Dump the attributes and flag the situation. */ if (cp_lexer_next_token_is (parser->lexer, CPP_PLUS) || cp_lexer_next_token_is (parser->lexer, CPP_MINUS)) return true; /* Otherwise, the attributes introduce some interstitial code, possibly so rewind to allow that check. */ cp_lexer_rollback_tokens (parser->lexer); return false; } /* Parse an Objective-C method prototype list. */ static void cp_parser_objc_method_prototype_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); while (token->keyword != RID_AT_END && token->type != CPP_EOF) { if (token->type == CPP_PLUS || token->type == CPP_MINUS) { tree attributes, sig; bool is_class_method; if (token->type == CPP_PLUS) is_class_method = true; else is_class_method = false; sig = cp_parser_objc_method_signature (parser, &attributes); if (sig == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); token = cp_lexer_peek_token (parser->lexer); continue; } objc_add_method_declaration (is_class_method, sig, attributes); cp_parser_consume_semicolon_at_end_of_statement (parser); } else if (token->keyword == RID_AT_PROPERTY) cp_parser_objc_at_property_declaration (parser); else if (token->keyword == RID_ATTRIBUTE && cp_parser_objc_method_maybe_bad_prefix_attributes(parser)) warning_at (cp_lexer_peek_token (parser->lexer)->location, OPT_Wattributes, "prefix attributes are ignored for methods"); else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ else cp_parser_error (parser, "expected %<@end%>"); objc_finish_interface (); } /* Parse an Objective-C method definition list. */ static void cp_parser_objc_method_definition_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); while (token->keyword != RID_AT_END && token->type != CPP_EOF) { tree meth; if (token->type == CPP_PLUS || token->type == CPP_MINUS) { cp_token *ptk; tree sig, attribute; bool is_class_method; if (token->type == CPP_PLUS) is_class_method = true; else is_class_method = false; push_deferring_access_checks (dk_deferred); sig = cp_parser_objc_method_signature (parser, &attribute); if (sig == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); token = cp_lexer_peek_token (parser->lexer); continue; } objc_start_method_definition (is_class_method, sig, attribute, NULL_TREE); /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); ptk = cp_lexer_peek_token (parser->lexer); if (!(ptk->type == CPP_PLUS || ptk->type == CPP_MINUS || ptk->type == CPP_EOF || ptk->keyword == RID_AT_END)) { perform_deferred_access_checks (); stop_deferring_access_checks (); meth = cp_parser_function_definition_after_declarator (parser, false); pop_deferring_access_checks (); objc_finish_method_definition (meth); } } /* The following case will be removed once @synthesize is completely implemented. */ else if (token->keyword == RID_AT_PROPERTY) cp_parser_objc_at_property_declaration (parser); else if (token->keyword == RID_AT_SYNTHESIZE) cp_parser_objc_at_synthesize_declaration (parser); else if (token->keyword == RID_AT_DYNAMIC) cp_parser_objc_at_dynamic_declaration (parser); else if (token->keyword == RID_ATTRIBUTE && cp_parser_objc_method_maybe_bad_prefix_attributes(parser)) warning_at (token->location, OPT_Wattributes, "prefix attributes are ignored for methods"); else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ else cp_parser_error (parser, "expected %<@end%>"); objc_finish_implementation (); } /* Parse Objective-C ivars. */ static void cp_parser_objc_class_ivars (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_OPEN_BRACE) return; /* No ivars specified. */ cp_lexer_consume_token (parser->lexer); /* Eat '{'. */ token = cp_lexer_peek_token (parser->lexer); while (token->type != CPP_CLOSE_BRACE && token->keyword != RID_AT_END && token->type != CPP_EOF) { cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_objc_visibility_spec (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) break; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &declspecs, &decl_class_or_enum_p); /* auto, register, static, extern, mutable. */ if (declspecs.storage_class != sc_none) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.storage_class = sc_none; } /* __thread. */ if (declspecs.specs[(int) ds_thread]) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.specs[(int) ds_thread] = 0; } /* typedef. */ if (declspecs.specs[(int) ds_typedef]) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.specs[(int) ds_typedef] = 0; } prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree width = NULL_TREE, attributes, first_attribute, decl; cp_declarator *declarator = NULL; int ctor_dtor_or_conv_p; /* Check for a (possibly unnamed) bitfield declaration. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COLON) goto eat_colon; if (token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { /* Get the name of the bitfield. */ declarator = make_id_declarator (NULL_TREE, cp_parser_identifier (parser), sfk_none); eat_colon: cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); } else { /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/false); } /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); if (width) /* Create the bitfield declaration. */ decl = grokbitfield (declarator, &declspecs, width, attributes); else decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); /* Add the instance variable. */ if (decl != error_mark_node && decl != NULL_TREE) objc_add_instance_variable (decl); /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } break; } cp_parser_consume_semicolon_at_end_of_statement (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->keyword == RID_AT_END) cp_parser_error (parser, "expected %<}%>"); /* Do not consume the RID_AT_END, so it will be read again as terminating the @interface of @implementation. */ if (token->keyword != RID_AT_END && token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '}'. */ /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C protocol declaration. */ static void cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes) { tree proto, protorefs; cp_token *tok; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { tok = cp_lexer_peek_token (parser->lexer); error_at (tok->location, "identifier expected after %<@protocol%>"); cp_parser_consume_semicolon_at_end_of_statement (parser); return; } /* See if we have a forward declaration or a definition. */ tok = cp_lexer_peek_nth_token (parser->lexer, 2); /* Try a forward declaration first. */ if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON) { while (true) { tree id; id = cp_parser_identifier (parser); if (id == error_mark_node) break; objc_declare_protocol (id, attributes); if(cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Ok, we got a full-fledged definition (or at least should). */ else { proto = cp_parser_identifier (parser); protorefs = cp_parser_objc_protocol_refs_opt (parser); objc_start_protocol (proto, protorefs, attributes); cp_parser_objc_method_prototype_list (parser); } } /* Parse an Objective-C superclass or category. */ static void cp_parser_objc_superclass_or_category (cp_parser *parser, bool iface_p, tree *super, tree *categ, bool *is_class_extension) { cp_token *next = cp_lexer_peek_token (parser->lexer); *super = *categ = NULL_TREE; *is_class_extension = false; if (next->type == CPP_COLON) { cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ *super = cp_parser_identifier (parser); } else if (next->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); /* Eat '('. */ /* If there is no category name, and this is an @interface, we have a class extension. */ if (iface_p && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) { *categ = NULL_TREE; *is_class_extension = true; } else *categ = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } } /* Parse an Objective-C class interface. */ static void cp_parser_objc_class_interface (cp_parser* parser, tree attributes) { tree name, super, categ, protos; bool is_class_extension; cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */ name = cp_parser_identifier (parser); if (name == error_mark_node) { /* It's hard to recover because even if valid @interface stuff is to follow, we can't compile it (or validate it) if we don't even know which class it refers to. Let's assume this was a stray '@interface' token in the stream and skip it. */ return; } cp_parser_objc_superclass_or_category (parser, true, &super, &categ, &is_class_extension); protos = cp_parser_objc_protocol_refs_opt (parser); /* We have either a class or a category on our hands. */ if (categ || is_class_extension) objc_start_category_interface (name, categ, protos, attributes); else { objc_start_class_interface (name, super, protos, attributes); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_interface (); } cp_parser_objc_method_prototype_list (parser); } /* Parse an Objective-C class implementation. */ static void cp_parser_objc_class_implementation (cp_parser* parser) { tree name, super, categ; bool is_class_extension; cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */ name = cp_parser_identifier (parser); if (name == error_mark_node) { /* It's hard to recover because even if valid @implementation stuff is to follow, we can't compile it (or validate it) if we don't even know which class it refers to. Let's assume this was a stray '@implementation' token in the stream and skip it. */ return; } cp_parser_objc_superclass_or_category (parser, false, &super, &categ, &is_class_extension); /* We have either a class or a category on our hands. */ if (categ) objc_start_category_implementation (name, categ); else { objc_start_class_implementation (name, super); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_implementation (); } cp_parser_objc_method_definition_list (parser); } /* Consume the @end token and finish off the implementation. */ static void cp_parser_objc_end_implementation (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ objc_finish_implementation (); } /* Parse an Objective-C declaration. */ static void cp_parser_objc_declaration (cp_parser* parser, tree attributes) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); if (attributes) switch (kwd->keyword) { case RID_AT_ALIAS: case RID_AT_CLASS: case RID_AT_END: error_at (kwd->location, "attributes may not be specified before" " the %<@%D%> Objective-C++ keyword", kwd->u.value); attributes = NULL; break; case RID_AT_IMPLEMENTATION: warning_at (kwd->location, OPT_Wattributes, "prefix attributes are ignored before %<@%D%>", kwd->u.value); attributes = NULL; default: break; } switch (kwd->keyword) { case RID_AT_ALIAS: cp_parser_objc_alias_declaration (parser); break; case RID_AT_CLASS: cp_parser_objc_class_declaration (parser); break; case RID_AT_PROTOCOL: cp_parser_objc_protocol_declaration (parser, attributes); break; case RID_AT_INTERFACE: cp_parser_objc_class_interface (parser, attributes); break; case RID_AT_IMPLEMENTATION: cp_parser_objc_class_implementation (parser); break; case RID_AT_END: cp_parser_objc_end_implementation (parser); break; default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } } /* Parse an Objective-C try-catch-finally statement. objc-try-catch-finally-stmt: @try compound-statement objc-catch-clause-seq [opt] objc-finally-clause [opt] objc-catch-clause-seq: objc-catch-clause objc-catch-clause-seq [opt] objc-catch-clause: @catch ( objc-exception-declaration ) compound-statement objc-finally-clause: @finally compound-statement objc-exception-declaration: parameter-declaration '...' where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS. Returns NULL_TREE. PS: This function is identical to c_parser_objc_try_catch_finally_statement for C. Keep them in sync. */ static tree cp_parser_objc_try_catch_finally_statement (cp_parser *parser) { location_t location; tree stmt; cp_parser_require_keyword (parser, RID_AT_TRY, RT_AT_TRY); location = cp_lexer_peek_token (parser->lexer)->location; objc_maybe_warn_exceptions (location); /* NB: The @try block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); objc_begin_try_stmt (location, pop_stmt_list (stmt)); while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH)) { cp_parameter_declarator *parm; tree parameter_declaration = error_mark_node; bool seen_open_paren = false; cp_lexer_consume_token (parser->lexer); if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) seen_open_paren = true; if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* We have "@catch (...)" (where the '...' are literally what is in the code). Skip the '...'. parameter_declaration is set to NULL_TREE, and objc_being_catch_clauses() knows that that means '...'. */ cp_lexer_consume_token (parser->lexer); parameter_declaration = NULL_TREE; } else { /* We have "@catch (NSException *exception)" or something like that. Parse the parameter declaration. */ parm = cp_parser_parameter_declaration (parser, false, NULL); if (parm == NULL) parameter_declaration = error_mark_node; else parameter_declaration = grokdeclarator (parm->declarator, &parm->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); } if (seen_open_paren) cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); else { /* If there was no open parenthesis, we are recovering from an error, and we are trying to figure out what mistake the user has made. */ /* If there is an immediate closing parenthesis, the user probably forgot the opening one (ie, they typed "@catch NSException *e)". Parse the closing parenthesis and keep going. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) cp_lexer_consume_token (parser->lexer); /* If these is no immediate closing parenthesis, the user probably doesn't know that parenthesis are required at all (ie, they typed "@catch NSException *e"). So, just forget about the closing parenthesis and keep going. */ } objc_begin_catch_clause (parameter_declaration); cp_parser_compound_statement (parser, NULL, false, false); objc_finish_catch_clause (); } if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY)) { cp_lexer_consume_token (parser->lexer); location = cp_lexer_peek_token (parser->lexer)->location; /* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); objc_build_finally_clause (location, pop_stmt_list (stmt)); } return objc_finish_try_stmt (); } /* Parse an Objective-C synchronized statement. objc-synchronized-stmt: @synchronized ( expression ) compound-statement Returns NULL_TREE. */ static tree cp_parser_objc_synchronized_statement (cp_parser *parser) { location_t location; tree lock, stmt; cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, RT_AT_SYNCHRONIZED); location = cp_lexer_peek_token (parser->lexer)->location; objc_maybe_warn_exceptions (location); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); lock = cp_parser_expression (parser, false, NULL); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); return objc_build_synchronized (location, lock, pop_stmt_list (stmt)); } /* Parse an Objective-C throw statement. objc-throw-stmt: @throw assignment-expression [opt] ; Returns a constructed '@throw' statement. */ static tree cp_parser_objc_throw_statement (cp_parser *parser) { tree expr = NULL_TREE; location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require_keyword (parser, RID_AT_THROW, RT_AT_THROW); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); cp_parser_consume_semicolon_at_end_of_statement (parser); return objc_build_throw_stmt (loc, expr); } /* Parse an Objective-C statement. */ static tree cp_parser_objc_statement (cp_parser * parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->keyword) { case RID_AT_TRY: return cp_parser_objc_try_catch_finally_statement (parser); case RID_AT_SYNCHRONIZED: return cp_parser_objc_synchronized_statement (parser); case RID_AT_THROW: return cp_parser_objc_throw_statement (parser); default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* If we are compiling ObjC++ and we see an __attribute__ we neeed to look ahead to see if an objc keyword follows the attributes. This is to detect the use of prefix attributes on ObjC @interface and @protocol. */ static bool cp_parser_objc_valid_prefix_attributes (cp_parser* parser, tree *attrib) { cp_lexer_save_tokens (parser->lexer); *attrib = cp_parser_attributes_opt (parser); gcc_assert (*attrib); if (OBJC_IS_AT_KEYWORD (cp_lexer_peek_token (parser->lexer)->keyword)) { cp_lexer_commit_tokens (parser->lexer); return true; } cp_lexer_rollback_tokens (parser->lexer); return false; } /* This routine is a minimal replacement for c_parser_struct_declaration () used when parsing the list of types/names or ObjC++ properties. For example, when parsing the code @property (readonly) int a, b, c; this function is responsible for parsing "int a, int b, int c" and returning the declarations as CHAIN of DECLs. TODO: Share this code with cp_parser_objc_class_ivars. It's very similar parsing. */ static tree cp_parser_objc_struct_declaration (cp_parser *parser) { tree decls = NULL_TREE; cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &declspecs, &decl_class_or_enum_p); if (declspecs.type == error_mark_node) return error_mark_node; /* auto, register, static, extern, mutable. */ if (declspecs.storage_class != sc_none) { cp_parser_error (parser, "invalid type for property"); declspecs.storage_class = sc_none; } /* __thread. */ if (declspecs.specs[(int) ds_thread]) { cp_parser_error (parser, "invalid type for property"); declspecs.specs[(int) ds_thread] = 0; } /* typedef. */ if (declspecs.specs[(int) ds_typedef]) { cp_parser_error (parser, "invalid type for property"); declspecs.specs[(int) ds_typedef] = 0; } prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes, first_attribute, decl; cp_declarator *declarator; cp_token *token; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, NULL, NULL, false); /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); if (decl == error_mark_node || decl == NULL_TREE) return error_mark_node; /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; DECL_CHAIN (decl) = decls; decls = decl; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } else break; } return decls; } /* Parse an Objective-C @property declaration. The syntax is: objc-property-declaration: '@property' objc-property-attributes[opt] struct-declaration ; objc-property-attributes: '(' objc-property-attribute-list ')' objc-property-attribute-list: objc-property-attribute objc-property-attribute-list, objc-property-attribute objc-property-attribute 'getter' = identifier 'setter' = identifier 'readonly' 'readwrite' 'assign' 'retain' 'copy' 'nonatomic' For example: @property NSString *name; @property (readonly) id object; @property (retain, nonatomic, getter=getTheName) id name; @property int a, b, c; PS: This function is identical to c_parser_objc_at_property_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_property_declaration (cp_parser *parser) { /* The following variables hold the attributes of the properties as parsed. They are 'false' or 'NULL_TREE' if the attribute was not seen. When we see an attribute, we set them to 'true' (if they are boolean properties) or to the identifier (if they have an argument, ie, for getter and setter). Note that here we only parse the list of attributes, check the syntax and accumulate the attributes that we find. objc_add_property_declaration() will then process the information. */ bool property_assign = false; bool property_copy = false; tree property_getter_ident = NULL_TREE; bool property_nonatomic = false; bool property_readonly = false; bool property_readwrite = false; bool property_retain = false; tree property_setter_ident = NULL_TREE; /* 'properties' is the list of properties that we read. Usually a single one, but maybe more (eg, in "@property int a, b, c;" there are three). */ tree properties; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@property'. */ /* Parse the optional attribute list... */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { /* Eat the '('. */ cp_lexer_consume_token (parser->lexer); while (true) { bool syntax_error = false; cp_token *token = cp_lexer_peek_token (parser->lexer); enum rid keyword; if (token->type != CPP_NAME) { cp_parser_error (parser, "expected identifier"); break; } keyword = C_RID_CODE (token->u.value); cp_lexer_consume_token (parser->lexer); switch (keyword) { case RID_ASSIGN: property_assign = true; break; case RID_COPY: property_copy = true; break; case RID_NONATOMIC: property_nonatomic = true; break; case RID_READONLY: property_readonly = true; break; case RID_READWRITE: property_readwrite = true; break; case RID_RETAIN: property_retain = true; break; case RID_GETTER: case RID_SETTER: if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)) { if (keyword == RID_GETTER) cp_parser_error (parser, "missing %<=%> (after %<getter%> attribute)"); else cp_parser_error (parser, "missing %<=%> (after %<setter%> attribute)"); syntax_error = true; break; } cp_lexer_consume_token (parser->lexer); /* eat the = */ if (!cp_parser_objc_selector_p (cp_lexer_peek_token (parser->lexer)->type)) { cp_parser_error (parser, "expected identifier"); syntax_error = true; break; } if (keyword == RID_SETTER) { if (property_setter_ident != NULL_TREE) { cp_parser_error (parser, "the %<setter%> attribute may only be specified once"); cp_lexer_consume_token (parser->lexer); } else property_setter_ident = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) cp_parser_error (parser, "setter name must terminate with %<:%>"); else cp_lexer_consume_token (parser->lexer); } else { if (property_getter_ident != NULL_TREE) { cp_parser_error (parser, "the %<getter%> attribute may only be specified once"); cp_lexer_consume_token (parser->lexer); } else property_getter_ident = cp_parser_objc_selector (parser); } break; default: cp_parser_error (parser, "unknown property attribute"); syntax_error = true; break; } if (syntax_error) break; if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } /* FIXME: "@property (setter, assign);" will generate a spurious "error: expected ‘)’ before ‘,’ token". This is because cp_parser_require, unlike the C counterpart, will produce an error even if we are in error recovery. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } /* ... and the property declaration(s). */ properties = cp_parser_objc_struct_declaration (parser); if (properties == error_mark_node) { cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); return; } if (properties == NULL_TREE) cp_parser_error (parser, "expected identifier"); else { /* Comma-separated properties are chained together in reverse order; add them one by one. */ properties = nreverse (properties); for (; properties; properties = TREE_CHAIN (properties)) objc_add_property_declaration (loc, copy_node (properties), property_readonly, property_readwrite, property_assign, property_retain, property_copy, property_nonatomic, property_getter_ident, property_setter_ident); } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C++ @synthesize declaration. The syntax is: objc-synthesize-declaration: @synthesize objc-synthesize-identifier-list ; objc-synthesize-identifier-list: objc-synthesize-identifier objc-synthesize-identifier-list, objc-synthesize-identifier objc-synthesize-identifier identifier identifier = identifier For example: @synthesize MyProperty; @synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty; PS: This function is identical to c_parser_objc_at_synthesize_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_synthesize_declaration (cp_parser *parser) { tree list = NULL_TREE; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@synthesize'. */ while (true) { tree property, ivar; property = cp_parser_identifier (parser); if (property == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { cp_lexer_consume_token (parser->lexer); ivar = cp_parser_identifier (parser); if (ivar == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } } else ivar = NULL_TREE; list = chainon (list, build_tree_list (ivar, property)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); objc_add_synthesize_declaration (loc, list); } /* Parse an Objective-C++ @dynamic declaration. The syntax is: objc-dynamic-declaration: @dynamic identifier-list ; For example: @dynamic MyProperty; @dynamic MyProperty, AnotherProperty; PS: This function is identical to c_parser_objc_at_dynamic_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_dynamic_declaration (cp_parser *parser) { tree list = NULL_TREE; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@dynamic'. */ while (true) { tree property; property = cp_parser_identifier (parser); if (property == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } list = chainon (list, build_tree_list (NULL, property)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); objc_add_dynamic_declaration (loc, list); } /* OpenMP 2.5 parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause cp_parser_omp_clause_name (cp_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE)) result = PRAGMA_OMP_CLAUSE_PRIVATE; else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp ("final", p)) result = PRAGMA_OMP_CLAUSE_FINAL; else if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'm': if (!strcmp ("mergeable", p)) result = PRAGMA_OMP_CLAUSE_MERGEABLE; break; case 'n': if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; case 'u': if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) cp_lexer_consume_token (parser->lexer); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name, location_t location) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { error_at (location, "too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier In addition, we match a closing parenthesis. An opening parenthesis will have been consumed by the caller. If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind, tree list) { cp_token *token; while (1) { tree name, decl; token = cp_lexer_peek_token (parser->lexer); name = cp_parser_id_expression (parser, /*template_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (name == error_mark_node) goto skip_comma; decl = cp_parser_lookup_name_simple (parser, name, token->location); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, name, decl, NLE_NULL, token->location); else if (kind != 0) { tree u = build_omp_clause (token->location, kind); OMP_CLAUSE_DECL (u) = decl; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (decl, NULL_TREE, list); get_comma: if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { int ending; /* Try to resync to an unnested comma. Copied from cp_parser_parenthesized_expression_list. */ skip_comma: ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list) { if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return cp_parser_omp_var_list_no_open (parser, kind, list); return list; } /* OpenMP 3.0: collapse ( constant-expression ) */ static tree cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location) { tree c, num; location_t loc; HOST_WIDE_INT n; loc = cp_lexer_peek_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; num = cp_parser_constant_expression (parser, false, NULL); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (num == error_mark_node) return list; num = fold_non_dependent_expr (num); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !host_integerp (num, 0) || (n = tree_low_cst (num, 0)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse", location); c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_COLLAPSE_EXPR (c) = num; return c; } /* OpenMP 2.5: default ( shared | none ) */ static tree cp_parser_omp_clause_default (cp_parser *parser, tree list, location_t location) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; tree c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } cp_lexer_consume_token (parser->lexer); } else { invalid_kind: cp_parser_error (parser, "expected %<none%> or %<shared%>"); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default", location); c = build_omp_clause (location, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 3.1: final ( expression ) */ static tree cp_parser_omp_clause_final (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final", location); c = build_omp_clause (location, OMP_CLAUSE_FINAL); OMP_CLAUSE_FINAL_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: if ( expression ) */ static tree cp_parser_omp_clause_if (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if", location); c = build_omp_clause (location, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 3.1: mergeable */ static tree cp_parser_omp_clause_mergeable (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable", location); c = build_omp_clause (location, OMP_CLAUSE_MERGEABLE); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: nowait */ static tree cp_parser_omp_clause_nowait (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait", location); c = build_omp_clause (location, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree cp_parser_omp_clause_num_threads (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_expression (parser, false, NULL); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads", location); c = build_omp_clause (location, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: ordered */ static tree cp_parser_omp_clause_ordered (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered", location); c = build_omp_clause (location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || OpenMP 3.1: reduction-operator: One of: + * - & ^ | && || min max */ static tree cp_parser_omp_clause_reduction (cp_parser *parser, tree list) { enum tree_code code; tree nlist, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; case CPP_NAME: { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "min") == 0) { code = MIN_EXPR; break; } if (strcmp (p, "max") == 0) { code = MAX_EXPR; break; } } /* FALLTHROUGH */ default: cp_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, %<^%>, " "%<|%>, %<&&%>, %<||%>, %<min%> or %<max%>"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) goto resync_fail; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list); for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_REDUCTION_CODE (c) = code; return nlist; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto */ static tree cp_parser_omp_clause_schedule (cp_parser *parser, tree list, location_t location) { tree c, t; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; c = build_omp_clause (location, OMP_CLAUSE_SCHEDULE); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); t = cp_parser_assignment_expression (parser, false, NULL); if (t == error_mark_node) goto resync_fail; else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (token->location, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (token->location, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) goto resync_fail; } else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN)) goto resync_fail; check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule", location); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: cp_parser_error (parser, "invalid schedule kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* OpenMP 3.0: untied */ static tree cp_parser_omp_clause_untied (cp_parser *parser ATTRIBUTE_UNUSED, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied", location); c = build_omp_clause (location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask, const char *where, cp_token *pragma_tok) { tree clauses = NULL; bool first = true; cp_token *token = NULL; while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) { pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); c_kind = cp_parser_omp_clause_name (parser); first = false; switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = cp_parser_omp_clause_collapse (parser, clauses, token->location); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = cp_parser_omp_clause_default (parser, clauses, token->location); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FINAL: clauses = cp_parser_omp_clause_final (parser, clauses, token->location); c_name = "final"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = cp_parser_omp_clause_if (parser, clauses, token->location); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_MERGEABLE: clauses = cp_parser_omp_clause_mergeable (parser, clauses, token->location); c_name = "mergeable"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = cp_parser_omp_clause_nowait (parser, clauses, token->location); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = cp_parser_omp_clause_num_threads (parser, clauses, token->location); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = cp_parser_omp_clause_ordered (parser, clauses, token->location); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = cp_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = cp_parser_omp_clause_schedule (parser, clauses, token->location); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = cp_parser_omp_clause_untied (parser, clauses, token->location); c_name = "nowait"; break; default: cp_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (token->location, "%qs is not valid for %qs", c_name, where); } } saw_error: cp_parser_skip_to_pragma_eol (parser, pragma_tok); return finish_omp_clauses (clauses); } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that cp_parser_statement calls add_stmt. */ static unsigned cp_parser_begin_omp_structured_block (cp_parser *parser) { unsigned save = parser->in_statement; /* Only move the values to IN_OMP_BLOCK if they weren't false. This preserves the "not within loop or switch" style error messages for nonsense cases like void foo() { #pragma omp single break; } */ if (parser->in_statement) parser->in_statement = IN_OMP_BLOCK; return save; } static void cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save) { parser->in_statement = save; } static tree cp_parser_omp_structured_block (cp_parser *parser) { tree stmt = begin_omp_structured_block (); unsigned int save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); return finish_omp_structured_block (stmt); } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. OpenMP 3.1: # pragma omp atomic new-line update-stmt # pragma omp atomic read new-line read-stmt # pragma omp atomic write new-line write-stmt # pragma omp atomic update new-line update-stmt # pragma omp atomic capture new-line capture-stmt # pragma omp atomic capture new-line capture-block read-stmt: v = x write-stmt: x = expr update-stmt: expression-stmt | x = x binop expr capture-stmt: v = x binop= expr | v = x++ | v = ++x | v = x-- | v = --x capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } where x and v are lvalue expressions with scalar type. */ static void cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok) { tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE, lhs1 = NULL_TREE; tree rhs1 = NULL_TREE, orig_lhs; enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR; bool structured_block = false; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (!strcmp (p, "read")) code = OMP_ATOMIC_READ; else if (!strcmp (p, "write")) code = NOP_EXPR; else if (!strcmp (p, "update")) code = OMP_ATOMIC; else if (!strcmp (p, "capture")) code = OMP_ATOMIC_CAPTURE_NEW; else p = NULL; if (p) cp_lexer_consume_token (parser->lexer); } cp_parser_require_pragma_eol (parser, pragma_tok); switch (code) { case OMP_ATOMIC_READ: case NOP_EXPR: /* atomic write */ v = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; if (code == NOP_EXPR) lhs = cp_parser_expression (parser, /*cast_p=*/false, NULL); else lhs = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (lhs == error_mark_node) goto saw_error; if (code == NOP_EXPR) { /* atomic write is represented by OMP_ATOMIC with NOP_EXPR opcode. */ code = OMP_ATOMIC; rhs = lhs; lhs = v; v = NULL_TREE; } goto done; case OMP_ATOMIC_CAPTURE_NEW: if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_consume_token (parser->lexer); structured_block = true; } else { v = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; } default: break; } restart: lhs = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); orig_lhs = lhs; switch (TREE_CODE (lhs)) { case ERROR_MARK: goto saw_error; case POSTINCREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); opcode = PLUS_EXPR; rhs = integer_one_node; break; case POSTDECREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); opcode = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } } /* FALLTHRU */ default: switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_MULT_EQ: opcode = MULT_EXPR; break; case CPP_DIV_EQ: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: opcode = PLUS_EXPR; break; case CPP_MINUS_EQ: opcode = MINUS_EXPR; break; case CPP_LSHIFT_EQ: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: opcode = RSHIFT_EXPR; break; case CPP_AND_EQ: opcode = BIT_AND_EXPR; break; case CPP_OR_EQ: opcode = BIT_IOR_EXPR; break; case CPP_XOR_EQ: opcode = BIT_XOR_EXPR; break; case CPP_EQ: if (structured_block || code == OMP_ATOMIC) { enum cp_parser_prec oprec; cp_token *token; cp_lexer_consume_token (parser->lexer); rhs1 = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (rhs1 == error_mark_node) goto saw_error; token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_SEMICOLON: if (code == OMP_ATOMIC_CAPTURE_NEW) { code = OMP_ATOMIC_CAPTURE_OLD; v = lhs; lhs = NULL_TREE; lhs1 = rhs1; rhs1 = NULL_TREE; cp_lexer_consume_token (parser->lexer); goto restart; } cp_parser_error (parser, "invalid form of %<#pragma omp atomic%>"); goto saw_error; case CPP_MULT: opcode = MULT_EXPR; break; case CPP_DIV: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS: opcode = PLUS_EXPR; break; case CPP_MINUS: opcode = MINUS_EXPR; break; case CPP_LSHIFT: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT: opcode = RSHIFT_EXPR; break; case CPP_AND: opcode = BIT_AND_EXPR; break; case CPP_OR: opcode = BIT_IOR_EXPR; break; case CPP_XOR: opcode = BIT_XOR_EXPR; break; default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } oprec = TOKEN_PRECEDENCE (token); gcc_assert (oprec != PREC_NOT_OPERATOR); if (commutative_tree_code (opcode)) oprec = (enum cp_parser_prec) (oprec - 1); cp_lexer_consume_token (parser->lexer); rhs = cp_parser_binary_expression (parser, false, false, oprec, NULL); if (rhs == error_mark_node) goto saw_error; goto stmt_done; } /* FALLTHROUGH */ default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } cp_lexer_consume_token (parser->lexer); rhs = cp_parser_expression (parser, false, NULL); if (rhs == error_mark_node) goto saw_error; break; } stmt_done: if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) goto saw_error; v = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; lhs1 = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false, NULL); if (lhs1 == error_mark_node) goto saw_error; } if (structured_block) { cp_parser_consume_semicolon_at_end_of_statement (parser); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } done: finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1); if (!structured_block) cp_parser_consume_semicolon_at_end_of_statement (parser); return; saw_error: cp_parser_skip_to_end_of_block_or_statement (parser); if (structured_block) { if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) cp_lexer_consume_token (parser->lexer); else if (code == OMP_ATOMIC_CAPTURE_NEW) { cp_parser_skip_to_end_of_block_or_statement (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) cp_lexer_consume_token (parser->lexer); } } } /* OpenMP 2.5: # pragma omp barrier new-line */ static void cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_barrier (); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block */ static tree cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok) { tree stmt, name = NULL; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); name = cp_parser_identifier (parser); if (name == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (name == error_mark_node) name = NULL; } cp_parser_require_pragma_eol (parser, pragma_tok); stmt = cp_parser_omp_structured_block (parser); return c_finish_omp_critical (input_location, stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) (void) cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_flush (); } /* Helper function, to parse omp for increment expression. */ static tree cp_parser_omp_for_cond (cp_parser *parser, tree decl) { tree cond = cp_parser_binary_expression (parser, false, true, PREC_NOT_OPERATOR, NULL); if (cond == error_mark_node || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; default: return error_mark_node; } /* If decl is an iterator, preserve LHS and RHS of the relational expr until finish_omp_for. */ if (decl && (type_dependent_expression_p (decl) || CLASS_TYPE_P (TREE_TYPE (decl)))) return cond; return build_x_binary_op (TREE_CODE (cond), TREE_OPERAND (cond, 0), ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, /*overload=*/NULL, tf_warning_or_error); } /* Helper function, to parse omp for increment expression. */ static tree cp_parser_omp_for_incr (cp_parser *parser, tree decl) { cp_token *token = cp_lexer_peek_token (parser->lexer); enum tree_code op; tree lhs, rhs; cp_id_kind idk; bool decl_first; if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS) { op = (token->type == CPP_PLUS_PLUS ? PREINCREMENT_EXPR : PREDECREMENT_EXPR); cp_lexer_consume_token (parser->lexer); lhs = cp_parser_cast_expression (parser, false, false, NULL); if (lhs != decl) return error_mark_node; return build2 (op, TREE_TYPE (decl), decl, NULL_TREE); } lhs = cp_parser_primary_expression (parser, false, false, false, &idk); if (lhs != decl) return error_mark_node; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS) { op = (token->type == CPP_PLUS_PLUS ? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR); cp_lexer_consume_token (parser->lexer); return build2 (op, TREE_TYPE (decl), decl, NULL_TREE); } op = cp_parser_assignment_operator_opt (parser); if (op == ERROR_MARK) return error_mark_node; if (op != NOP_EXPR) { rhs = cp_parser_assignment_expression (parser, false, NULL); rhs = build2 (op, TREE_TYPE (decl), decl, rhs); return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs); } lhs = cp_parser_binary_expression (parser, false, false, PREC_ADDITIVE_EXPRESSION, NULL); token = cp_lexer_peek_token (parser->lexer); decl_first = lhs == decl; if (decl_first) lhs = NULL_TREE; if (token->type != CPP_PLUS && token->type != CPP_MINUS) return error_mark_node; do { op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR; cp_lexer_consume_token (parser->lexer); rhs = cp_parser_binary_expression (parser, false, false, PREC_ADDITIVE_EXPRESSION, NULL); token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first) { if (lhs == NULL_TREE) { if (op == PLUS_EXPR) lhs = rhs; else lhs = build_x_unary_op (NEGATE_EXPR, rhs, tf_warning_or_error); } else lhs = build_x_binary_op (op, lhs, ERROR_MARK, rhs, ERROR_MARK, NULL, tf_warning_or_error); } } while (token->type == CPP_PLUS || token->type == CPP_MINUS); if (!decl_first) { if (rhs != decl || op == MINUS_EXPR) return error_mark_node; rhs = build2 (op, TREE_TYPE (decl), lhs, decl); } else rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs); return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs); } /* Parse the restricted form of the for statement allowed by OpenMP. */ static tree cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses) { tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret; tree real_decl, initv, condv, incrv, declv; tree this_pre_body, cl; location_t loc_first; bool collapse_err = false; int i, collapse = 1, nbraces = 0; VEC(tree,gc) *for_block = make_tree_vector (); for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); gcc_assert (collapse >= 1); declv = make_tree_vec (collapse); initv = make_tree_vec (collapse); condv = make_tree_vec (collapse); incrv = make_tree_vec (collapse); loc_first = cp_lexer_peek_token (parser->lexer)->location; for (i = 0; i < collapse; i++) { int bracecount = 0; bool add_private_clause = false; location_t loc; if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_parser_error (parser, "for statement expected"); return NULL; } loc = cp_lexer_consume_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return NULL; init = decl = real_decl = NULL; this_pre_body = push_stmt_list (); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { /* See 2.5.1 (in OpenMP 3.0, similar wording is in 2.5 standard too): init-expr: var = lb integer-type var = lb random-access-iterator-type var = lb pointer-type var = lb */ cp_decl_specifier_seq type_specifiers; /* First, try to parse as an initialized declaration. See cp_parser_condition, from whence the bulk of this is copied. */ cp_parser_parse_tentatively (parser); cp_parser_type_specifier_seq (parser, /*is_declaration=*/true, /*is_trailing_return=*/false, &type_specifiers); if (cp_parser_parse_definitely (parser)) { /* If parsing a type specifier seq succeeded, then this MUST be a initialized declaration. */ tree asm_specification, attributes; cp_declarator *declarator; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); attributes = cp_parser_attributes_opt (parser); asm_specification = cp_parser_asm_specification_opt (parser); if (declarator == cp_error_declarator) cp_parser_skip_to_end_of_statement (parser); else { tree pushed_scope, auto_node; decl = start_decl (declarator, &type_specifiers, SD_INITIALIZED, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); auto_node = type_uses_auto (TREE_TYPE (decl)); if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) error ("parenthesized initialization is not allowed in " "OpenMP %<for%> loop"); else /* Trigger an error. */ cp_parser_require (parser, CPP_EQ, RT_EQ); init = error_mark_node; cp_parser_skip_to_end_of_statement (parser); } else if (CLASS_TYPE_P (TREE_TYPE (decl)) || type_dependent_expression_p (decl) || auto_node) { bool is_direct_init, is_non_constant_init; init = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); if (auto_node) { TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), init, auto_node); if (!CLASS_TYPE_P (TREE_TYPE (decl)) && !type_dependent_expression_p (decl)) goto non_class; } cp_finish_decl (decl, init, !is_non_constant_init, asm_specification, LOOKUP_ONLYCONVERTING); if (CLASS_TYPE_P (TREE_TYPE (decl))) { VEC_safe_push (tree, gc, for_block, this_pre_body); init = NULL_TREE; } else init = pop_stmt_list (this_pre_body); this_pre_body = NULL_TREE; } else { /* Consume '='. */ cp_lexer_consume_token (parser->lexer); init = cp_parser_assignment_expression (parser, false, NULL); non_class: if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE) init = error_mark_node; else cp_finish_decl (decl, NULL_TREE, /*init_const_expr_p=*/false, asm_specification, LOOKUP_ONLYCONVERTING); } if (pushed_scope) pop_scope (pushed_scope); } } else { cp_id_kind idk; /* If parsing a type specifier sequence failed, then this MUST be a simple expression. */ cp_parser_parse_tentatively (parser); decl = cp_parser_primary_expression (parser, false, false, false, &idk); if (!cp_parser_error_occurred (parser) && decl && DECL_P (decl) && CLASS_TYPE_P (TREE_TYPE (decl))) { tree rhs; cp_parser_parse_definitely (parser); cp_parser_require (parser, CPP_EQ, RT_EQ); rhs = cp_parser_assignment_expression (parser, false, NULL); finish_expr_stmt (build_x_modify_expr (decl, NOP_EXPR, rhs, tf_warning_or_error)); add_private_clause = true; } else { decl = NULL; cp_parser_abort_tentative_parse (parser); init = cp_parser_expression (parser, false, NULL); if (init) { if (TREE_CODE (init) == MODIFY_EXPR || TREE_CODE (init) == MODOP_EXPR) real_decl = TREE_OPERAND (init, 0); } } } } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (this_pre_body) { this_pre_body = pop_stmt_list (this_pre_body); if (pre_body) { tree t = pre_body; pre_body = push_stmt_list (); add_stmt (t); add_stmt (this_pre_body); pre_body = pop_stmt_list (pre_body); } else pre_body = this_pre_body; } if (decl) real_decl = decl; if (par_clauses != NULL && real_decl != NULL_TREE) { tree *c; for (c = par_clauses; *c ; ) if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) { error_at (loc, "iteration variable %qD" " should not be firstprivate", real_decl); *c = OMP_CLAUSE_CHAIN (*c); } else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) { /* Add lastprivate (decl) clause to OMP_FOR_CLAUSES, change it to shared (decl) in OMP_PARALLEL_CLAUSES. */ tree l = build_omp_clause (loc, OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (l) = real_decl; OMP_CLAUSE_CHAIN (l) = clauses; CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c); clauses = l; OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED); CP_OMP_CLAUSE_INFO (*c) = NULL; add_private_clause = false; } else { if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) add_private_clause = false; c = &OMP_CLAUSE_CHAIN (*c); } } if (add_private_clause) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) && OMP_CLAUSE_DECL (c) == decl) break; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (c) == decl) error_at (loc, "iteration variable %qD " "should not be firstprivate", decl); else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_DECL (c) == decl) error_at (loc, "iteration variable %qD should not be reduction", decl); } if (c == NULL) { c = build_omp_clause (loc, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; c = finish_omp_clauses (c); if (c) { OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } } cond = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cond = cp_parser_omp_for_cond (parser, decl); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); incr = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) { /* If decl is an iterator, preserve the operator on decl until finish_omp_for. */ if (real_decl && ((processing_template_decl && !POINTER_TYPE_P (TREE_TYPE (real_decl))) || CLASS_TYPE_P (TREE_TYPE (real_decl)))) incr = cp_parser_omp_for_incr (parser, real_decl); else incr = cp_parser_expression (parser, false, NULL); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; if (i == collapse - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ cp_parser_parse_tentatively (parser); do { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) break; else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_consume_token (parser->lexer); bracecount++; } else if (bracecount && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); else { loc = cp_lexer_peek_token (parser->lexer)->location; error_at (loc, "not enough collapsed for loops"); collapse_err = true; cp_parser_abort_tentative_parse (parser); declv = NULL_TREE; break; } } while (1); if (declv) { cp_parser_parse_definitely (parser); nbraces += bracecount; } } /* Note that we saved the original contents of this flag when we entered the structured block, and so we don't need to re-save it here. */ parser->in_statement = IN_OMP_FOR; /* Note that the grammar doesn't call for a structured block here, though the loop as a whole is a structured block. */ body = push_stmt_list (); cp_parser_statement (parser, NULL_TREE, false, NULL); body = pop_stmt_list (body); if (declv == NULL_TREE) ret = NULL_TREE; else ret = finish_omp_for (loc_first, declv, initv, condv, incrv, body, pre_body, clauses); while (nbraces) { if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { cp_lexer_consume_token (parser->lexer); nbraces--; } else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); else { if (!collapse_err) { error_at (cp_lexer_peek_token (parser->lexer)->location, "collapsed loops not perfectly nested"); } collapse_err = true; cp_parser_statement_seq_opt (parser, NULL); if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) break; } } while (!VEC_empty (tree, for_block)) add_stmt (pop_stmt_list (VEC_pop (tree, for_block))); release_tree_vector (for_block); return ret; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT) \ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok) { tree clauses, sb, ret; unsigned int save; clauses = cp_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK, "#pragma omp for", pragma_tok); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_for_loop (parser, clauses, NULL); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block */ static tree cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_master (input_location, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block */ static tree cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_ordered (loc, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block */ static tree cp_parser_omp_sections_scope (cp_parser *parser) { tree stmt, substmt; bool error_suppress = false; cp_token *tok; if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) return NULL_TREE; stmt = push_stmt_list (); if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION) { unsigned save; substmt = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); while (1) { cp_parser_statement (parser, NULL_TREE, false, NULL); tok = cp_lexer_peek_token (parser->lexer); if (tok->pragma_kind == PRAGMA_OMP_SECTION) break; if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; } cp_parser_end_omp_structured_block (parser, save); substmt = finish_omp_structured_block (substmt); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } while (1) { tok = cp_lexer_peek_token (parser->lexer); if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; if (tok->pragma_kind == PRAGMA_OMP_SECTION) { cp_lexer_consume_token (parser->lexer); cp_parser_require_pragma_eol (parser, tok); error_suppress = false; } else if (!error_suppress) { cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = cp_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; add_stmt (stmt); return stmt; } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok) { tree clauses, ret; clauses = cp_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK, "#pragma omp sections", pragma_tok); ret = cp_parser_omp_sections_scope (parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; return ret; } /* OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma parallel for parallel-for-clause new-line # pragma parallel sections parallel-sections-clause new-line */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = "#pragma omp parallel"; tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_lexer_consume_token (parser->lexer); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = "#pragma omp parallel for"; mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "sections") == 0) { cp_lexer_consume_token (parser->lexer); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = "#pragma omp parallel sections"; mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); switch (p_kind) { case PRAGMA_OMP_PARALLEL: cp_parser_statement (parser, NULL_TREE, false, NULL); par_clause = clauses; break; case PRAGMA_OMP_PARALLEL_FOR: c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); cp_parser_omp_for_loop (parser, ws_clause, &par_clause); break; case PRAGMA_OMP_PARALLEL_SECTIONS: c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); stmt = cp_parser_omp_sections_scope (parser); if (stmt) OMP_SECTIONS_CLAUSES (stmt) = ws_clause; break; default: gcc_unreachable (); } cp_parser_end_omp_structured_block (parser, save); stmt = finish_omp_parallel (par_clause, block); if (p_kind != PRAGMA_OMP_PARALLEL) OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok) { tree stmt = make_node (OMP_SINGLE); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single", pragma_tok); OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 3.0: # pragma omp task task-clause[optseq] new-line structured-block */ #define OMP_TASK_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_FINAL) \ | (1u << PRAGMA_OMP_CLAUSE_MERGEABLE)) static tree cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok) { tree clauses, block; unsigned int save; clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task", pragma_tok); block = begin_omp_task (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); return finish_omp_task (clauses, block); } /* OpenMP 3.0: # pragma omp taskwait new-line */ static void cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_taskwait (); } /* OpenMP 3.1: # pragma omp taskyield new-line */ static void cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_taskyield (); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok) { tree vars; vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_threadprivate (vars); } /* Main entry point to OpenMP statement pragmas. */ static void cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok) { tree stmt; switch (pragma_tok->pragma_kind) { case PRAGMA_OMP_ATOMIC: cp_parser_omp_atomic (parser, pragma_tok); return; case PRAGMA_OMP_CRITICAL: stmt = cp_parser_omp_critical (parser, pragma_tok); break; case PRAGMA_OMP_FOR: stmt = cp_parser_omp_for (parser, pragma_tok); break; case PRAGMA_OMP_MASTER: stmt = cp_parser_omp_master (parser, pragma_tok); break; case PRAGMA_OMP_ORDERED: stmt = cp_parser_omp_ordered (parser, pragma_tok); break; case PRAGMA_OMP_PARALLEL: stmt = cp_parser_omp_parallel (parser, pragma_tok); break; case PRAGMA_OMP_SECTIONS: stmt = cp_parser_omp_sections (parser, pragma_tok); break; case PRAGMA_OMP_SINGLE: stmt = cp_parser_omp_single (parser, pragma_tok); break; case PRAGMA_OMP_TASK: stmt = cp_parser_omp_task (parser, pragma_tok); break; default: gcc_unreachable (); } if (stmt) SET_EXPR_LOCATION (stmt, pragma_tok->location); } /* Transactional Memory parsing routines. */ /* Parse a transaction attribute. txn-attribute: attribute [ [ identifier ] ] ??? Simplify this when C++0x bracket attributes are implemented properly. */ static tree cp_parser_txn_attribute_opt (cp_parser *parser) { cp_token *token; tree attr_name, attr = NULL; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) return cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) return NULL_TREE; cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE)) goto error1; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { token = cp_lexer_consume_token (parser->lexer); attr_name = (token->type == CPP_KEYWORD /* For keywords, use the canonical spelling, not the parsed identifier. */ ? ridpointers[(int) token->keyword] : token->u.value); attr = build_tree_list (attr_name, NULL_TREE); } else cp_parser_error (parser, "expected identifier"); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); error1: cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return attr; } /* Parse a __transaction_atomic or __transaction_relaxed statement. transaction-statement: __transaction_atomic txn-attribute[opt] txn-noexcept-spec[opt] compound-statement __transaction_relaxed txn-noexcept-spec[opt] compound-statement */ static tree cp_parser_transaction (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char this_in = 1, new_in; cp_token *token; tree stmt, attrs, noex; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = cp_parser_txn_attribute_opt (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } /* Parse a noexcept specification. */ noex = cp_parser_noexcept_specification_opt (parser, true, NULL, true); /* Keep track if we're in the lexical scope of an outer transaction. */ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER); stmt = begin_transaction_stmt (token->location, NULL, this_in); parser->in_transaction = new_in; cp_parser_compound_statement (parser, NULL, false, false); parser->in_transaction = old_in; finish_transaction_stmt (stmt, NULL, this_in, noex); return stmt; } /* Parse a __transaction_atomic or __transaction_relaxed expression. transaction-expression: __transaction_atomic txn-noexcept-spec[opt] ( expression ) __transaction_relaxed txn-noexcept-spec[opt] ( expression ) */ static tree cp_parser_transaction_expression (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char this_in = 1; cp_token *token; tree expr, noex; bool noex_expr; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); if (!flag_tm) error (keyword == RID_TRANSACTION_RELAXED ? G_("%<__transaction_relaxed%> without transactional memory " "support enabled") : G_("%<__transaction_atomic%> without transactional memory " "support enabled")); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; /* Set this early. This might mean that we allow transaction_cancel in an expression that we find out later actually has to be a constexpr. However, we expect that cxx_constant_value will be able to deal with this; also, if the noexcept has no constexpr, then what we parse next really is a transaction's body. */ parser->in_transaction = this_in; /* Parse a noexcept specification. */ noex = cp_parser_noexcept_specification_opt (parser, false, &noex_expr, true); if (!noex || !noex_expr || cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); expr = cp_parser_expression (parser, /*cast_p=*/false, NULL); finish_parenthesized_expr (expr); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } else { /* The only expression that is available got parsed for the noexcept already. noexcept is true then. */ expr = noex; noex = boolean_true_node; } expr = build_transaction_expr (token->location, expr, this_in, noex); parser->in_transaction = old_in; if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION)) return error_mark_node; return (flag_tm ? expr : error_mark_node); } /* Parse a function-transaction-block. function-transaction-block: __transaction_atomic txn-attribute[opt] ctor-initializer[opt] function-body __transaction_atomic txn-attribute[opt] function-try-block __transaction_relaxed ctor-initializer[opt] function-body __transaction_relaxed function-try-block */ static bool cp_parser_function_transaction (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char new_in = 1; tree compound_stmt, stmt, attrs; bool ctor_initializer_p; cp_token *token; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) new_in |= TM_STMT_ATTR_RELAXED; else { attrs = cp_parser_txn_attribute_opt (parser); if (attrs) new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in); parser->in_transaction = new_in; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); parser->in_transaction = old_in; finish_transaction_stmt (stmt, compound_stmt, new_in, NULL_TREE); return ctor_initializer_p; } /* Parse a __transaction_cancel statement. cancel-statement: __transaction_cancel txn-attribute[opt] ; __transaction_cancel txn-attribute[opt] throw-expression ; ??? Cancel and throw is not yet implemented. */ static tree cp_parser_transaction_cancel (cp_parser *parser) { cp_token *token; bool is_outer = false; tree stmt, attrs; token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL, RT_TRANSACTION_CANCEL); gcc_assert (token != NULL); attrs = cp_parser_txn_attribute_opt (parser); if (attrs) is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0); /* ??? Parse cancel-and-throw here. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (!flag_tm) { error_at (token->location, "%<__transaction_cancel%> without " "transactional memory support enabled"); return error_mark_node; } else if (parser->in_transaction & TM_STMT_ATTR_RELAXED) { error_at (token->location, "%<__transaction_cancel%> within a " "%<__transaction_relaxed%>"); return error_mark_node; } else if (is_outer) { if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0 && !is_tm_may_cancel_outer (current_function_decl)) { error_at (token->location, "outer %<__transaction_cancel%> not " "within outer %<__transaction_atomic%>"); error_at (token->location, " or a %<transaction_may_cancel_outer%> function"); return error_mark_node; } } else if (parser->in_transaction == 0) { error_at (token->location, "%<__transaction_cancel%> not within " "%<__transaction_atomic%>"); return error_mark_node; } stmt = build_tm_abort_call (token->location, is_outer); add_stmt (stmt); finish_stmt (); return stmt; } /* The parser. */ static GTY (()) cp_parser *the_parser; /* Special handling for the first token or line in the file. The first thing in the file might be #pragma GCC pch_preprocess, which loads a PCH file, which is a GC collection point. So we need to handle this first pragma without benefit of an existing lexer structure. Always returns one token to the caller in *FIRST_TOKEN. This is either the true first token of the file, or the first token after the initial pragma. */ static void cp_parser_initial_pragma (cp_token *first_token) { tree name = NULL; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS) return; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type == CPP_STRING) { name = first_token->u.value; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type != CPP_PRAGMA_EOL) error_at (first_token->location, "junk at end of %<#pragma GCC pch_preprocess%>"); } else error_at (first_token->location, "expected string literal"); /* Skip to the end of the pragma. */ while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF) cp_lexer_get_preprocessor_token (NULL, first_token); /* Now actually load the PCH file. */ if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); /* Read one more token to return to our caller. We have to do this after reading the PCH file in, since its pointers have to be live. */ cp_lexer_get_preprocessor_token (NULL, first_token); } /* Normal parsing of a pragma token. Here we can (and must) use the regular lexer. */ static bool cp_parser_pragma (cp_parser *parser, enum pragma_context context) { cp_token *pragma_tok; unsigned int id; pragma_tok = cp_lexer_consume_token (parser->lexer); gcc_assert (pragma_tok->type == CPP_PRAGMA); parser->lexer->in_pragma = true; id = pragma_tok->pragma_kind; switch (id) { case PRAGMA_GCC_PCH_PREPROCESS: error_at (pragma_tok->location, "%<#pragma GCC pch_preprocess%> must be first"); break; case PRAGMA_OMP_BARRIER: switch (context) { case pragma_compound: cp_parser_omp_barrier (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp barrier%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_FLUSH: switch (context) { case pragma_compound: cp_parser_omp_flush (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp flush%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_TASKWAIT: switch (context) { case pragma_compound: cp_parser_omp_taskwait (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp taskwait%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_TASKYIELD: switch (context) { case pragma_compound: cp_parser_omp_taskyield (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp taskyield%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_THREADPRIVATE: cp_parser_omp_threadprivate (parser, pragma_tok); return false; case PRAGMA_OMP_ATOMIC: case PRAGMA_OMP_CRITICAL: case PRAGMA_OMP_FOR: case PRAGMA_OMP_MASTER: case PRAGMA_OMP_ORDERED: case PRAGMA_OMP_PARALLEL: case PRAGMA_OMP_SECTIONS: case PRAGMA_OMP_SINGLE: case PRAGMA_OMP_TASK: if (context == pragma_external) goto bad_stmt; cp_parser_omp_construct (parser, pragma_tok); return true; case PRAGMA_OMP_SECTION: error_at (pragma_tok->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); break; default: gcc_assert (id >= PRAGMA_FIRST_EXTERNAL); c_invoke_pragma_handler (id); break; bad_stmt: cp_parser_error (parser, "expected declaration specifiers"); break; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { cp_token *tok; enum cpp_ttype ret; tok = cp_lexer_peek_token (the_parser->lexer); ret = tok->type; *value = tok->u.value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else if (ret == CPP_STRING) *value = cp_parser_string_literal (the_parser, false, false); else { cp_lexer_consume_token (the_parser->lexer); if (ret == CPP_KEYWORD) ret = CPP_NAME; } return ret; } /* External interface. */ /* Parse one entire translation unit. */ void c_parse_file (void) { static bool already_called = false; if (already_called) { sorry ("inter-module optimizations not implemented for C++"); return; } already_called = true; the_parser = cp_parser_new (); push_deferring_access_checks (flag_access_control ? dk_no_deferred : dk_no_check); cp_parser_translation_unit (the_parser); the_parser = NULL; } #include "gt-cp-parser.h"
4-4t.c
#include <stdio.h> #include <omp.h> int main() { int i; omp_set_num_threads(4); #pragma omp parallel for for (i=0; i<16; i++) { printf("Hello from thread number: %d Iteration: %d \n", omp_get_thread_num(), i); } printf("\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
mg.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB MG code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// //--------------------------------------------------------------------- // program mg //--------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "globals.h" #include "randdp.h" #include "timers.h" #include "print_results.h" #include "../my_include/my_include.h" static void setup(int *n1, int *n2, int *n3); static void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3); static void psinv(void *or, void *ou, int n1, int n2, int n3, double c[4], int k); static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3, double a[4], int k); static void rprj3(void *or, int m1k, int m2k, int m3k, void *os, int m1j, int m2j, int m3j, int k); static void interp(void *oz, int mm1, int mm2, int mm3, void *ou, int n1, int n2, int n3, int k); static void norm2u3(void *or, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz); static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk); static void comm3(void *ou, int n1, int n2, int n3, int kk); static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k); static void showall(void *oz, int n1, int n2, int n3); static double power(double a, int n); static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2], int m, int ind); static void zero3(void *oz, int n1, int n2, int n3); //-------------------------------------------------------------------------c // These arrays are in common because they are quite large // and probably shouldn't be allocated on the stack. They // are always passed as subroutine args. //-------------------------------------------------------------------------c /* commcon /noautom/ */ static double u[NR]; static double v[NR]; static double r[NR]; /* common /grid/ */ static int is1, is2, is3, ie1, ie2, ie3; /* common /rans_save/ starts */ double starts[NM]; int main() { //-------------------------------------------------------------------------c // k is the current level. It is passed down through subroutine args // and is NOT global. it is the current iteration //-------------------------------------------------------------------------c int k, it; crucial_data(&u[0], "double", NR); crucial_data(&r[0], "double", NR); consistent_data(&it, "int", 1); double t, tinit, mflops; double a[4], c[4]; double rnm2, rnmu, old2, oldu, epsilon; int n1, n2, n3, nit; double nn, verify_value, err; logical verified; int i; char *t_names[T_last]; double tmax; for (i = T_init; i < T_last; i++) { timer_clear(i); } timer_start(T_init); //--------------------------------------------------------------------- // Read in and broadcast input data //--------------------------------------------------------------------- FILE *fp; if ((fp = fopen("timer.flag", "r")) != NULL) { timeron = true; t_names[T_init] = "init"; t_names[T_bench] = "benchmk"; t_names[T_mg3P] = "mg3P"; t_names[T_psinv] = "psinv"; t_names[T_resid] = "resid"; t_names[T_rprj3] = "rprj3"; t_names[T_interp] = "interp"; t_names[T_norm2] = "norm2"; t_names[T_comm3] = "comm3"; fclose(fp); } else { timeron = false; } printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - MG Benchmark\n\n"); if ((fp = fopen("mg.input.sample", "r")) != NULL) { int result; printf(" Reading from input file mg.input\n"); result = fscanf(fp, "%d\n", &lt); printf("lt is %d\n", result); while (fgetc(fp) != '\n'); result = fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]); printf("nz\[lt\] is %d %d %d\n", nx[lt], ny[lt], nz[lt]); while (fgetc(fp) != '\n'); result = fscanf(fp, "%d", &nit); printf("nit is $d\n", result); while (fgetc(fp) != '\n'); for (i = 0; i <= 7; i++) { result = fscanf(fp, "%d", &debug_vec[i]); printf("%d\n", debug_vec[i]); } fclose(fp); } else { printf(" No input file. Using compiled defaults \n"); lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; for (i = 0; i <= 7; i++) { debug_vec[i] = DEBUG_DEFAULT; } } if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) { Class = 'U'; } else if ( nx[lt] == 32 && nit == 4 ) { Class = 'S'; } else if ( nx[lt] == 128 && nit == 4 ) { Class = 'W'; } else if ( nx[lt] == 256 && nit == 4 ) { Class = 'A'; } else if ( nx[lt] == 256 && nit == 20 ) { Class = 'B'; } else if ( nx[lt] == 512 && nit == 20 ) { Class = 'C'; } else if ( nx[lt] == 1024 && nit == 50 ) { Class = 'D'; } else if ( nx[lt] == 2048 && nit == 50 ) { Class = 'E'; } else { Class = 'U'; } printf("%d\n", nx[lt]); //--------------------------------------------------------------------- // Use these for debug info: //--------------------------------------------------------------------- // debug_vec(0) = 1 !=> report all norms // debug_vec(1) = 1 !=> some setup information // debug_vec(1) = 2 !=> more setup information // debug_vec(2) = k => at level k or below, show result of resid // debug_vec(3) = k => at level k or below, show result of psinv // debug_vec(4) = k => at level k or below, show result of rprj // debug_vec(5) = k => at level k or below, show result of interp // debug_vec(6) = 1 => (unused) // debug_vec(7) = 1 => (unused) //--------------------------------------------------------------------- a[0] = -8.0/3.0; a[1] = 0.0; a[2] = 1.0/6.0; a[3] = 1.0/12.0; if (Class == 'A' || Class == 'S' || Class =='W') { //--------------------------------------------------------------------- // Coefficients for the S(a) smoother //--------------------------------------------------------------------- c[0] = -3.0/8.0; c[1] = +1.0/32.0; c[2] = -1.0/64.0; c[3] = 0.0; } else { //--------------------------------------------------------------------- // Coefficients for the S(b) smoother //--------------------------------------------------------------------- c[0] = -3.0/17.0; c[1] = +1.0/33.0; c[2] = -1.0/61.0; c[3] = 0.0; } lb = 1; k = lt; printf("%d\n", lb); setup(&n1, &n2, &n3); printf("%d\n", lb); zero3(u, n1, n2, n3); printf("%d\n", lb); zran3(v, n1, n2, n3, nx[lt], ny[lt], k); printf("%dend!\n", lb); norm2u3(v, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); // printf("\n"); printf(" norms of random v are\n"); // printf("%4d%19.2f%19.2e\n", 0, rnm2, rnmu); // printf(" about to evaluate resid, k=%d\n", k); printf(" Size: %4dx%4dx%4d (class %c)\n", nx[lt], ny[lt], nz[lt], Class); printf(" Iterations: %5d\n", nit); printf(" Number of available threads: %5d\n", omp_get_max_threads()); printf("\n"); resid(u, v, r, n1, n2, n3, a, k); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); old2 = rnm2; oldu = rnmu; //--------------------------------------------------------------------- // One iteration for startup //--------------------------------------------------------------------- mg3P(u, v, r, a, c, n1, n2, n3); resid(u, v, r, n1, n2, n3, a, k); setup(&n1, &n2, &n3); zero3(u, n1, n2, n3); zran3(v, n1, n2, n3, nx[lt], ny[lt], k); timer_stop(T_init); tinit = timer_read(T_init); printf(" Initialization time: %15.3f seconds\n\n", tinit); for (i = T_bench; i < T_last; i++) { timer_clear(i); } timer_start(T_bench); if (timeron) timer_start(T_resid2); resid(u, v, r, n1, n2, n3, a, k); if (timeron) timer_stop(T_resid2); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); old2 = rnm2; oldu = rnmu; flush_whole_cache(); start_crash(); int first = 0; for (it = 1; it <= nit; it++) { if(it == 3 && first == 0) { it = 1; first = 1;} printf("it = %d\n",it); if ((it == 1) || (it == nit) || ((it % 5) == 0)) { printf(" iter %3d\n", it); } if (timeron) timer_start(T_mg3P); mg3P(u, v, r, a, c, n1, n2, n3); if (timeron) timer_stop(T_mg3P); if (timeron) timer_start(T_resid2); resid(u, v, r, n1, n2, n3, a, k); if (timeron) timer_stop(T_resid2); clwb(&it); } end_crash(); norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]); timer_stop(T_bench); t = timer_read(T_bench); verified = false; verify_value = 0.0; printf("\n Benchmark completed\n"); epsilon = 1.0e-8; if (Class != 'U') { if (Class == 'S') { verify_value = 0.5307707005734e-04; } else if (Class == 'W') { verify_value = 0.6467329375339e-05; } else if (Class == 'A') { verify_value = 0.2433365309069e-05; } else if (Class == 'B') { verify_value = 0.1800564401355e-05; } else if (Class == 'C') { verify_value = 0.5706732285740e-06; } else if (Class == 'D') { verify_value = 0.1583275060440e-09; } else if (Class == 'E') { verify_value = 0.5630442584711e-10; } err = fabs( rnm2 - verify_value ) / verify_value; if (err <= epsilon) { verified = true; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.13E\n", rnm2); printf(" Error is %20.13E\n", err); } else { verified = false; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.13E\n", rnm2); printf(" The correct L2 Norm is %20.13E\n", verify_value); } } else { verified = false; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); printf(" L2 Norm is %20.13E\n", rnm2); } nn = 1.0 * nx[lt] * ny[lt] * nz[lt]; if (t != 0.0) { mflops = 58.0 * nit * nn * 1.0e-6 / t; } else { mflops = 0.0; } print_results("MG", Class, nx[lt], ny[lt], nz[lt], nit, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); //--------------------------------------------------------------------- // More timers //--------------------------------------------------------------------- if (timeron) { tmax = timer_read(T_bench); if (tmax == 0.0) tmax = 1.0; printf(" SECTION Time (secs)\n"); for (i = T_bench; i < T_last; i++) { t = timer_read(i); if (i == T_resid2) { t = timer_read(T_resid) - t; printf(" --> %8s:%9.3f (%6.2f%%)\n", "mg-resid", t, t*100./tmax); } else { printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100./tmax); } } } return 0; } static void setup(int *n1, int *n2, int *n3) { int k, j; int ax, mi[MAXLEVEL+1][3]; int ng[MAXLEVEL+1][3]; ng[lt][0] = nx[lt]; ng[lt][1] = ny[lt]; ng[lt][2] = nz[lt]; for (k = lt-1; k >= 1; k--) { for (ax = 0; ax < 3; ax++) { ng[k][ax] = ng[k+1][ax]/2; } } for (k = lt; k >= 1; k--) { nx[k] = ng[k][0]; ny[k] = ng[k][1]; nz[k] = ng[k][2]; } for (k = lt; k >= 1; k--) { for (ax = 0; ax < 3; ax++) { mi[k][ax] = 2 + ng[k][ax]; } m1[k] = mi[k][0]; m2[k] = mi[k][1]; m3[k] = mi[k][2]; } k = lt; is1 = 2 + ng[k][0] - ng[lt][0]; ie1 = 1 + ng[k][0]; *n1 = 3 + ie1 - is1; is2 = 2 + ng[k][1] - ng[lt][1]; ie2 = 1 + ng[k][1]; *n2 = 3 + ie2 - is2; is3 = 2 + ng[k][2] - ng[lt][2]; ie3 = 1 + ng[k][2]; *n3 = 3 + ie3 - is3; ir[lt] = 0; for (j = lt-1; j >= 1; j--) { ir[j] = ir[j+1]+ONE*m1[j+1]*m2[j+1]*m3[j+1]; } if (debug_vec[1] >= 1) { printf(" in setup, \n"); printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", k,lt,ng[k][0],ng[k][1],ng[k][2],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3); } } //--------------------------------------------------------------------- // multigrid V-cycle routine //--------------------------------------------------------------------- static void mg3P(double u[], double v[], double r[], double a[4], double c[4], int n1, int n2, int n3) { int j, k; //--------------------------------------------------------------------- // down cycle. // restrict the residual from the find grid to the coarse //--------------------------------------------------------------------- for (k = lt; k >= lb+1; k--) { j = k - 1; rprj3(&r[ir[k]], m1[k], m2[k], m3[k], &r[ir[j]], m1[j], m2[j], m3[j], k); } k = lb; //--------------------------------------------------------------------- // compute an approximate solution on the coarsest grid //--------------------------------------------------------------------- zero3(&u[ir[k]], m1[k], m2[k], m3[k]); psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); for (k = lb+1; k <= lt-1; k++) { j = k - 1; //--------------------------------------------------------------------- // prolongate from level k-1 to k //--------------------------------------------------------------------- zero3(&u[ir[k]], m1[k], m2[k], m3[k]); interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k); //--------------------------------------------------------------------- // compute residual for level k //--------------------------------------------------------------------- resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k); //--------------------------------------------------------------------- // apply smoother //--------------------------------------------------------------------- psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k); resid(u, v, r, n1, n2, n3, a, k); psinv(r, u, n1, n2, n3, c, k); } //--------------------------------------------------------------------- // psinv applies an approximate inverse as smoother: u = u + Cr // // This implementation costs 15A + 4M per result, where // A and M denote the costs of Addition and Multiplication. // Presuming coefficient c(3) is zero (the NPB assumes this, // but it is thus not a general case), 2A + 1M may be eliminated, // resulting in 13A + 3M. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- static void psinv(void *or, void *ou, int n1, int n2, int n3, double c[4], int k) { double (*r)[n2][n1] = (double (*)[n2][n1])or; double (*u)[n2][n1] = (double (*)[n2][n1])ou; int i3, i2, i1; double r1[M], r2[M]; if (timeron) timer_start(T_psinv); #pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1] + r[i3-1][i2][i1] + r[i3+1][i2][i1]; r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1] + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); //-------------------------------------------------------------------- // Assume c[3] = 0 (Enable line below if c[3] not= 0) //-------------------------------------------------------------------- // + c[3] * ( r2[i1-1] + r2[i1+1] ) //-------------------------------------------------------------------- } } } if (timeron) timer_stop(T_psinv); //--------------------------------------------------------------------- // exchange boundary points //--------------------------------------------------------------------- comm3(u, n1, n2, n3, k); if (debug_vec[0] >= 1) { rep_nrm(u, n1, n2, n3, " psinv", k); } if (debug_vec[3] >= k) { showall(u, n1, n2, n3); } } //--------------------------------------------------------------------- // resid computes the residual: r = v - Au // // This implementation costs 15A + 4M per result, where // A and M denote the costs of Addition (or Subtraction) and // Multiplication, respectively. // Presuming coefficient a(1) is zero (the NPB assumes this, // but it is thus not a general case), 3A + 1M may be eliminated, // resulting in 12A + 3M. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3, double a[4], int k) { double (*u)[n2][n1] = (double (*)[n2][n1])ou; double (*v)[n2][n1] = (double (*)[n2][n1])ov; double (*r)[n2][n1] = (double (*)[n2][n1])or; int i3, i2, i1; double u1[M], u2[M]; if (timeron) timer_start(T_resid); #pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1] + u[i3-1][i2][i1] + u[i3+1][i2][i1]; u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1] + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] //------------------------------------------------------------------- // Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0) //------------------------------------------------------------------- // - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1] // + u1[i1] ) //------------------------------------------------------------------- - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); } } } if (timeron) timer_stop(T_resid); //--------------------------------------------------------------------- // exchange boundary data //--------------------------------------------------------------------- comm3(r, n1, n2, n3, k); if (debug_vec[0] >= 1) { rep_nrm(r, n1, n2, n3, " resid", k); } if (debug_vec[2] >= k) { showall(r, n1, n2, n3); } } //--------------------------------------------------------------------- // rprj3 projects onto the next coarser grid, // using a trilinear Finite Element projection: s = r' = P r // // This implementation costs 20A + 4M per result, where // A and M denote the costs of Addition and Multiplication. // Note that this vectorizes, and is also fine for cache // based machines. //--------------------------------------------------------------------- static void rprj3(void *or, int m1k, int m2k, int m3k, void *os, int m1j, int m2j, int m3j, int k) { double (*r)[m2k][m1k] = (double (*)[m2k][m1k])or; double (*s)[m2j][m1j] = (double (*)[m2j][m1j])os; int j3, j2, j1, i3, i2, i1, d1, d2, d3, j; double x1[M], y1[M], x2, y2; if (timeron) timer_start(T_rprj3); if (m1k == 3) { d1 = 2; } else { d1 = 1; } if (m2k == 3) { d2 = 2; } else { d2 = 1; } if (m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp parallel for default(shared) \ private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2) for (j3 = 1; j3 < m3j-1; j3++) { i3 = 2*j3-d3; for (j2 = 1; j2 < m2j-1; j2++) { i2 = 2*j2-d2; for (j1 = 1; j1 < m1j; j1++) { i1 = 2*j1-d1; x1[i1] = r[i3+1][i2 ][i1] + r[i3+1][i2+2][i1] + r[i3 ][i2+1][i1] + r[i3+2][i2+1][i1]; y1[i1] = r[i3 ][i2 ][i1] + r[i3+2][i2 ][i1] + r[i3 ][i2+2][i1] + r[i3+2][i2+2][i1]; } for (j1 = 1; j1 < m1j-1; j1++) { i1 = 2*j1-d1; y2 = r[i3 ][i2 ][i1+1] + r[i3+2][i2 ][i1+1] + r[i3 ][i2+2][i1+1] + r[i3+2][i2+2][i1+1]; x2 = r[i3+1][i2 ][i1+1] + r[i3+1][i2+2][i1+1] + r[i3 ][i2+1][i1+1] + r[i3+2][i2+1][i1+1]; s[j3][j2][j1] = 0.5 * r[i3+1][i2+1][i1+1] + 0.25 * (r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2) + 0.125 * (x1[i1] + x1[i1+2] + y2) + 0.0625 * (y1[i1] + y1[i1+2]); } } } if (timeron) timer_stop(T_rprj3); j = k-1; comm3(s, m1j, m2j, m3j, j); if (debug_vec[0] >= 1) { rep_nrm(s, m1j, m2j, m3j, " rprj3", k-1); } if (debug_vec[4] >= k) { showall(s, m1j, m2j, m3j); } } //--------------------------------------------------------------------- // interp adds the trilinear interpolation of the correction // from the coarser grid to the current approximation: u = u + Qu' // // Observe that this implementation costs 16A + 4M, where // A and M denote the costs of Addition and Multiplication. // Note that this vectorizes, and is also fine for cache // based machines. Vector machines may get slightly better // performance however, with 8 separate "do i1" loops, rather than 4. //--------------------------------------------------------------------- static void interp(void *oz, int mm1, int mm2, int mm3, void *ou, int n1, int n2, int n3, int k) { double (*z)[mm2][mm1] = (double (*)[mm2][mm1])oz; double (*u)[n2][n1] = (double (*)[n2][n1])ou; int i3, i2, i1, d1, d2, d3, t1, t2, t3; // note that m = 1037 in globals.h but for this only need to be // 535 to handle up to 1024^3 // integer m // parameter( m=535 ) double z1[M], z2[M], z3[M]; if (timeron) timer_start(T_interp); if (n1 != 3 && n2 != 3 && n3 != 3) { #pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3) for (i3 = 0; i3 < mm3-1; i3++) { for (i2 = 0; i2 < mm2-1; i2++) { for (i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1]; z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1]; } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1] + z[i3][i2][i1]; u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1] + 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1] + 0.5 * z1[i1]; u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1] + 0.25 * (z1[i1] + z1[i1+1]); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1] + 0.5 * z2[i1]; u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1] + 0.25 * (z2[i1] + z2[i1+1]); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1] + 0.25 * z3[i1]; u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1] + 0.125 * (z3[i1] + z3[i1+1]); } } } } else { if (n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if (n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if (n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } #pragma omp parallel default(shared) private(i1,i2,i3) { #pragma omp for for (i3 = d3; i3 <= mm3-1; i3++) { for (i2 = d2; i2 <= mm2-1; i2++) { for (i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] + z[i3-1][i2-1][i1-1]; } for (i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] + 0.5 * (z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]); } } for (i2 = 1; i2 <= mm2-1; i2++) { for (i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] + 0.5 * (z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); } for (i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] + 0.25 * (z[i3-1][i2][i1] + z[i3-1][i2-1][i1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); } } } #pragma omp for nowait for (i3 = 1; i3 <= mm3-1; i3++) { for (i2 = d2; i2 <= mm2-1; i2++) { for (i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] + 0.5 * (z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1]); } for (i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] + 0.25 * (z[i3 ][i2-1][i1] + z[i3 ][i2-1][i1-1] + z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]); } } for (i2 = 1; i2 <= mm2-1; i2++) { for (i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] + 0.25 * (z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); } for (i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] + 0.125 * (z[i3 ][i2][i1 ] + z[i3 ][i2-1][i1 ] + z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1] + z[i3-1][i2][i1 ] + z[i3-1][i2-1][i1 ] + z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]); } } } } // end parallel } if (timeron) timer_stop(T_interp); if (debug_vec[0] >= 1) { rep_nrm(z, mm1, mm2, mm3, "z: inter", k-1); rep_nrm(u, n1, n2, n3, "u: inter", k); } if (debug_vec[5] >= k) { showall(z, mm1, mm2, mm3); showall(u, n1, n2, n3); } } //--------------------------------------------------------------------- // norm2u3 evaluates approximations to the L2 norm and the // uniform (or L-infinity or Chebyshev) norm, under the // assumption that the boundaries are periodic or zero. Add the // boundaries in with half weight (quarter weight on the edges // and eighth weight at the corners) for inhomogeneous boundaries. //--------------------------------------------------------------------- static void norm2u3(void *or, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz) { double (*r)[n2][n1] = (double (*)[n2][n1])or; double s, a; int i3, i2, i1; double dn, max_rnmu; if (timeron) timer_start(T_norm2); dn = 1.0*nx*ny*nz; s = 0.0; max_rnmu = 0.0; #pragma omp parallel default(shared) private(i1,i2,i3,a) reduction(+:s) { double my_rnmu = 0.0; #pragma omp for nowait for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { s = s + pow(r[i3][i2][i1], 2.0); a = fabs(r[i3][i2][i1]); my_rnmu = (a > my_rnmu) ? a : my_rnmu; } } } if (my_rnmu > max_rnmu) { #pragma omp critical max_rnmu = (my_rnmu > max_rnmu) ? my_rnmu : max_rnmu; } } // end parallel *rnmu = max_rnmu; *rnm2 = sqrt(s / dn); if (timeron) timer_stop(T_norm2); } //--------------------------------------------------------------------- // report on norm //--------------------------------------------------------------------- static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk) { double rnm2, rnmu; norm2u3(u, n1, n2, n3, &rnm2, &rnmu, nx[kk], ny[kk], nz[kk]); printf(" Level%2d in %8s: norms =%21.14E%21.14E\n", kk, title, rnm2, rnmu); } //--------------------------------------------------------------------- // comm3 organizes the communication on all borders //--------------------------------------------------------------------- static void comm3(void *ou, int n1, int n2, int n3, int kk) { double (*u)[n2][n1] = (double (*)[n2][n1])ou; int i1, i2, i3; if (timeron) timer_start(T_comm3); #pragma omp parallel default(shared) private(i1,i2,i3) { #pragma omp for for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { u[i3][i2][ 0] = u[i3][i2][n1-2]; u[i3][i2][n1-1] = u[i3][i2][ 1]; } // } // for (i3 = 1; i3 < n3-1; i3++) { for (i1 = 0; i1 < n1; i1++) { u[i3][ 0][i1] = u[i3][n2-2][i1]; u[i3][n2-1][i1] = u[i3][ 1][i1]; } } #pragma omp for nowait for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { u[ 0][i2][i1] = u[n3-2][i2][i1]; u[n3-1][i2][i1] = u[ 1][i2][i1]; } } } // end parallel if (timeron) timer_stop(T_comm3); } //--------------------------------------------------------------------- // zran3 loads +1 at ten randomly chosen points, // loads -1 at a different ten random points, // and zero elsewhere. //--------------------------------------------------------------------- static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k) { double (*z)[n2][n1] = (double (*)[n2][n1])oz; int i0, mm0, mm1; int i1, i2, i3, d1, e1, e2, e3; double xx, x0, x1, a1, a2, ai; const int mm = 10; const double a = pow(5.0, 13.0); const double x = 314159265.0; double ten[mm][2], best0, best1; int i, j1[mm][2], j2[mm][2], j3[mm][2]; int jg[4][mm][2]; double rdummy; int myid, num_threads; a1 = power(a, nx1); a2 = power(a, nx1*ny1); zero3(z, n1, n2, n3); i = is1-2+nx1*(is2-2+ny1*(is3-2)); ai = power(a, i); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = x; rdummy = randlc(&x0, ai); //--------------------------------------------------------------------- // save the starting seeds for the following loop //--------------------------------------------------------------------- for (i3 = 1; i3 < e3; i3++) { starts[i3] = x0; rdummy = randlc(&x0, a2); } //--------------------------------------------------------------------- // fill array //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i2,i3,x1,xx,rdummy) \ shared(e2,e3,d1,a1) for (i3 = 1; i3 < e3; i3++) { x1 = starts[i3]; for (i2 = 1; i2 < e2; i2++) { xx = x1; vranlc(d1, &xx, a, &(z[i3][i2][1])); rdummy = randlc(&x1, a1); } } //--------------------------------------------------------------------- // comm3(z,n1,n2,n3); // showall(z,n1,n2,n3); //--------------------------------------------------------------------- //--------------------------------------------------------------------- // each thread looks for twenty candidates //--------------------------------------------------------------------- #pragma omp parallel default(shared) private(i,i0,i1,i2,i3,j1,j2,j3,ten, \ myid,num_threads) shared(best0,best1,n1,n2,n3) { for (i = 0; i < mm; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } #pragma omp for for (i3 = 1; i3 < n3-1; i3++) { double (*zi3)[n1] = z[i3]; for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { if (zi3[i2][i1] > ten[0][1]) { ten[0][1] = zi3[i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble(ten, j1, j2, j3, mm, 1); } if (zi3[i2][i1] < ten[0][0]) { ten[0][0] = zi3[i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble(ten, j1, j2, j3, mm, 0); } } } } //--------------------------------------------------------------------- // Now which of these are globally best? //--------------------------------------------------------------------- i1 = mm - 1; i0 = mm - 1; myid = 0; myid = omp_get_thread_num(); num_threads = omp_get_num_threads(); for (i = mm - 1; i >= 0; i--) { // ... ORDERED access is required here for sequential consistency // ... in case that two values are identical. // ... Since an "ORDERED" section is only defined in OpenMP 2, // ... we use a dummy loop to emulate ordered access in OpenMP 1.x. #pragma omp master { best1 = 0.0; best0 = 1.0; } #pragma omp for ordered schedule(static) for (i2 = 1; i2 <= num_threads; i2++) { #pragma omp ordered { if (ten[i1][1] > best1) { best1 = ten[i1][1]; jg[0][i][1] = myid; } if (ten[i0][0] < best0) { best0 = ten[i0][0]; jg[0][i][0] = myid; } } } if (myid == jg[0][i][1]) { jg[1][i][1] = j1[i1][1]; jg[2][i][1] = j2[i1][1]; jg[3][i][1] = j3[i1][1]; i1 = i1-1; } if (myid == jg[0][i][0]) { jg[1][i][0] = j1[i0][0]; jg[2][i][0] = j2[i0][0]; jg[3][i][0] = j3[i0][0]; i0 = i0-1; } } } // end parallel // mm1 = i1+1; // mm0 = i0+1; mm1 = 0; mm0 = 0; /* int cnt = 0; printf(" \n"); printf(" negative charges at\n"); for (i = 0; i < mm; i++) { printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" positive charges at\n"); for (i = 0; i < mm; i++) { printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" small random numbers were\n"); for (i = mm-1; i >= 0; i--) { printf(" %15.8E", ten[i][0]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" and they were found on processor number\n"); for (i = mm-1; i >= 0; i--) { printf(" %4d", jg[0][i][0]); if (++cnt % 10 == 0) printf("\n"); } cnt = 0; printf(" large random numbers were\n"); for (i = mm-1; i >= 0; i--) { printf(" %15.8E", ten[i][1]); if (++cnt % 5 == 0) printf("\n"); } cnt = 0; printf(" and they were found on processor number\n"); for (i = mm-1; i >= 0; i--) { printf(" %4d", jg[0][i][1]); if (++cnt % 10 == 0) printf("\n"); } */ #pragma omp parallel for default(shared) private(i1,i2,i3) for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } for (i = mm-1; i >= mm0; i--) { z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] = -1.0; } for (i = mm-1; i >= mm1; i--) { z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] = +1.0; } comm3(z, n1, n2, n3, k); //--------------------------------------------------------------------- // showall(z,n1,n2,n3); //--------------------------------------------------------------------- } static void showall(void *oz, int n1, int n2, int n3) { double (*z)[n2][n1] = (double (*)[n2][n1])oz; int i1, i2, i3; int m1, m2, m3; m1 = min(n1, 18); m2 = min(n2, 14); m3 = min(n3, 18); printf(" \n"); for (i3 = 0; i3 < m3; i3++) { for (i1 = 0; i1 < m1; i1++) { for (i2 = 0; i2 < m2; i2++) { printf("%6.3f", z[i3][i2][i1]); } printf("\n"); } printf(" - - - - - - - \n"); } printf(" \n"); } //--------------------------------------------------------------------- // power raises an integer, disguised as a double // precision real, to an integer power //--------------------------------------------------------------------- static double power(double a, int n) { double aj; int nj; double rdummy; double power; power = 1.0; nj = n; aj = a; while (nj != 0) { if ((nj % 2) == 1) rdummy = randlc(&power, aj); rdummy = randlc(&aj, aj); nj = nj/2; } return power; } //--------------------------------------------------------------------- // bubble does a bubble sort in direction dir //--------------------------------------------------------------------- static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2], int m, int ind) { double temp; int i, j_temp; if (ind == 1) { for (i = 0; i < m-1; i++) { if (ten[i][ind] > ten[i+1][ind]) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { for (i = 0; i < m-1; i++) { if (ten[i][ind] < ten[i+1][ind]) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } static void zero3(void *oz, int n1, int n2, int n3) { double (*z)[n2][n1] = (double (*)[n2][n1])oz; int i1, i2, i3; #pragma omp parallel for default(shared) private(i1,i2,i3) for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } }
cancel-for-2.c
/* { dg-do run } */ /* { dg-set-target-env-var OMP_CANCELLATION "true" } */ #include <stdlib.h> #include <omp.h> __attribute__((noinline, noclone)) int foo (int *x) { int v = 0, w = 0; #pragma omp parallel num_threads (32) shared (v, w) { int i; #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[0]) abort (); } #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[1]) #pragma omp atomic v++; } #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[2]) #pragma omp atomic w += 8; } #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[3]) #pragma omp atomic v += 2; } } if (v != 3000 || w != 0) abort (); #pragma omp parallel num_threads (32) shared (v, w) { int i; /* None of these cancel directives should actually cancel anything, but the compiler shouldn't know that and thus should use cancellable barriers at the end of all the workshares. */ #pragma omp cancel parallel if (omp_get_thread_num () == 1 && x[4]) #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[0]) abort (); } #pragma omp cancel parallel if (omp_get_thread_num () == 2 && x[4]) #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[1]) #pragma omp atomic v++; } #pragma omp cancel parallel if (omp_get_thread_num () == 3 && x[4]) #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[2]) #pragma omp atomic w += 8; } #pragma omp cancel parallel if (omp_get_thread_num () == 4 && x[4]) #pragma omp for for (i = 0; i < 1000; ++i) { #pragma omp cancel for if (x[3]) #pragma omp atomic v += 2; } #pragma omp cancel parallel if (omp_get_thread_num () == 5 && x[4]) } if (v != 6000 || w != 0) abort (); return 0; } int main () { int x[] = { 1, 0, 1, 0, 0 }; if (omp_get_cancellation ()) foo (x); return 0; }
dSchCompUdt-2Ddynamic.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief This file contains the main loop of pdgstrf which involves rank k * update of the Schur complement. * Uses 2D partitioning for the scatter phase. * * <pre> * -- Distributed SuperLU routine (version 5.4) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * * Modified: * September 14, 2017 * - First gather U-panel, then depending on "ldu" (excluding leading zeros), * gather only trailing columns of the L-panel corresponding to the nonzero * of U-rows. * - Padding zeros for nice dimensions of GEMM. * * June 1, 2018 add parallel AWPM pivoting; add back arrive_at_ublock() */ #define SCHEDULE_STRATEGY guided /* * Buffers: * [ lookAhead_L_buff | Remain_L_buff ] : stores the gathered L-panel * (A matrix in C := A*B ) * bigU : stores the U-panel (B matrix in C := A*B) * bigV : stores the block GEMM result (C matrix in C := A*B) */ if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */ int cum_nrow = 0; /* cumulative number of nonzero rows in L(:,k) */ int temp_nbrow; /* nonzero rows in current block L(i,k) */ lptr = lptr0; luptr = luptr0; int Lnbrow, Rnbrow; /* number of nonzero rows in look-ahead window, and remaining part. */ /******************************************************************* * Separating L blocks into the top part within look-ahead window * and the remaining ones. *******************************************************************/ int lookAheadBlk=0, RemainBlk=0; tt_start = SuperLU_timer_(); /* Sherry -- can this loop be threaded?? */ /* Loop through all blocks in L(:,k) to set up pointers to the start * of each block in the data arrays. * - lookAheadFullRow[i] := number of nonzero rows from block 0 to i * - lookAheadStRow[i] := number of nonzero rows before block i * - lookAhead_lptr[i] := point to the start of block i in L's index[] * - (ditto Remain_Info[i]) */ for (int i = 0; i < nlb; ++i) { ib = lsub[lptr]; /* Block number of L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ int look_up_flag = 1; /* assume ib is outside look-up window */ for (int j = k0+1; j < SUPERLU_MIN (k0 + num_look_aheads+2, nsupers ); ++j) { if ( ib == perm_c_supno[j] ) { look_up_flag = 0; /* flag ib within look-up window */ break; /* Sherry -- can exit the loop?? */ } } if ( look_up_flag == 0 ) { /* ib is within look-up window */ if (lookAheadBlk==0) { lookAheadFullRow[lookAheadBlk] = temp_nbrow; } else { lookAheadFullRow[lookAheadBlk] = temp_nbrow + lookAheadFullRow[lookAheadBlk-1]; } lookAheadStRow[lookAheadBlk] = cum_nrow; lookAhead_lptr[lookAheadBlk] = lptr; lookAhead_ib[lookAheadBlk] = ib; lookAheadBlk++; } else { /* ib is not in look-up window */ if ( RemainBlk==0 ) { Remain_info[RemainBlk].FullRow = temp_nbrow; } else { Remain_info[RemainBlk].FullRow = temp_nbrow + Remain_info[RemainBlk-1].FullRow; } RemainStRow[RemainBlk] = cum_nrow; // Remain_lptr[RemainBlk] = lptr; Remain_info[RemainBlk].lptr = lptr; // Remain_ib[RemainBlk] = ib; Remain_info[RemainBlk].ib = ib; RemainBlk++; } cum_nrow += temp_nbrow; lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; /* Move to next block */ luptr += temp_nbrow; } /* for i ... set up pointers for all blocks in L(:,k) */ lptr = lptr0; luptr = luptr0; /* leading dimension of L look-ahead buffer, same as Lnbrow */ //int LDlookAhead_LBuff = lookAheadBlk==0 ? 0 :lookAheadFullRow[lookAheadBlk-1]; Lnbrow = lookAheadBlk==0 ? 0 : lookAheadFullRow[lookAheadBlk-1]; /* leading dimension of L remaining buffer, same as Rnbrow */ //int LDRemain_LBuff = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; /* assert( cum_nrow == (LDlookAhead_LBuff + LDRemain_LBuff) );*/ /* Piyush fix */ //int LDlookAhead_LBuff = lookAheadBlk==0? 0 : lookAheadFullRow[lookAheadBlk-1]; nbrow = Lnbrow + Rnbrow; /* total number of rows in L */ LookAheadRowSepMOP += 2*knsupc*(nbrow); /*********************************************** * Gather U blocks (AFTER LOOK-AHEAD WINDOW) * ***********************************************/ tt_start = SuperLU_timer_(); if ( nbrow > 0 ) { /* L(:,k) is not empty */ /* * Counting U blocks */ ldu = 0; /* Calculate ldu for U(k,:) after look-ahead window. */ ncols = 0; /* Total number of nonzero columns in U(k,:) */ int temp_ncols = 0; /* jj0 contains the look-ahead window that was updated in dlook_ahead_update.c. Now the search can continue from that point, not to start from block 0. */ #if 0 // Sherry comment out 5/21/208 /* Save pointers at location right after look-ahead window for later restart. */ iukp0 = iukp; rukp0 = rukp; #endif /* if ( iam==0 ) printf("--- k0 %d, k %d, jj0 %d, nub %d\n", k0, k, jj0, nub);*/ /* * Loop through all blocks in U(k,:) to set up pointers to the start * of each block in the data arrays, store them in Ublock_info[j] * for block U(k,j). */ for (j = jj0; j < nub; ++j) { /* jj0 starts after look-ahead window. */ temp_ncols = 0; #if 1 /* Cannot remove following call, since perm_u != Identity */ arrive_at_ublock( j, &iukp, &rukp, &jb, &ljb, &nsupc, iukp0, rukp0, usub, perm_u, xsup, grid ); #else jb = usub[iukp]; /* ljb = LBj (jb, grid); Local block number of U(k,j). */ nsupc = SuperSize(jb); iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */ #endif Ublock_info[j].iukp = iukp; Ublock_info[j].rukp = rukp; Ublock_info[j].jb = jb; /* if ( iam==0 ) printf("j %d: Ublock_info[j].iukp %d, Ublock_info[j].rukp %d," "Ublock_info[j].jb %d, nsupc %d\n", j, Ublock_info[j].iukp, Ublock_info[j].rukp, Ublock_info[j].jb, nsupc); */ /* Prepare to call GEMM. */ jj = iukp; for (; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { ++temp_ncols; if ( segsize > ldu ) ldu = segsize; } } Ublock_info[j].full_u_cols = temp_ncols; ncols += temp_ncols; #if 0 // Sherry comment out 5/31/2018 */ /* Jump number of nonzeros in block U(k,jj); Move to block U(k,j+1) in nzval[] array. */ rukp += usub[iukp - 1]; iukp += nsupc; #endif } /* end for j ... compute ldu & ncols */ /* Now doing prefix sum on full_u_cols. * After this, full_u_cols is the number of nonzero columns * from block 0 to block j. */ for ( j = jj0+1; j < nub; ++j) { Ublock_info[j].full_u_cols += Ublock_info[j-1].full_u_cols; } /* Padding zeros to make {m,n,k} multiple of vector length. */ jj = 8; //n; if (gemm_padding > 0 && Rnbrow > jj && ncols > jj && ldu > jj) { gemm_m_pad = Rnbrow + (Rnbrow % GEMM_PADLEN); gemm_n_pad = ncols + (ncols % GEMM_PADLEN); //gemm_n_pad = ncols; //gemm_k_pad = ldu + (ldu % GEMM_PADLEN); gemm_k_pad = ldu; for (i = Rnbrow; i < gemm_m_pad; ++i) // padding A matrix for (j = 0; j < gemm_k_pad; ++j) Remain_L_buff[i + j*gemm_m_pad] = zero; for (i = 0; i < Rnbrow; ++i) for (j = ldu; j < gemm_k_pad; ++j) Remain_L_buff[i + j*gemm_m_pad] = zero; for (i = ldu; i < gemm_k_pad; ++i) // padding B matrix for (j = 0; j < gemm_n_pad; ++j) bigU[i + j*gemm_k_pad] = zero; for (i = 0; i < ldu; ++i) for (j = ncols; j < gemm_n_pad; ++j) bigU[i + j*gemm_k_pad] = zero; } else { gemm_m_pad = Rnbrow; gemm_n_pad = ncols; gemm_k_pad = ldu; } tempu = bigU; /* buffer the entire row block U(k,:) */ /* Gather U(k,:) into buffer bigU[] to prepare for GEMM */ #ifdef _OPENMP #pragma omp parallel for firstprivate(iukp, rukp) \ private(j,tempu, jb, nsupc,ljb,segsize, lead_zero, jj, i) \ default (shared) schedule(SCHEDULE_STRATEGY) #endif for (j = jj0; j < nub; ++j) { /* jj0 starts after look-ahead window. */ if (j==jj0) tempu = bigU; //else tempu = bigU + ldu * Ublock_info[j-1].full_u_cols; else tempu = bigU + gemm_k_pad * Ublock_info[j-1].full_u_cols; /* == processing each of the remaining columns in parallel == */ #if 0 /* Can remove following call, since search was already done. */ arrive_at_ublock(j, &iukp, &rukp, &jb, &ljb, &nsupc, iukp0, rukp0, usub,perm_u, xsup, grid); #else iukp = Ublock_info[j].iukp; rukp = Ublock_info[j].rukp; jb = Ublock_info[j].jb; nsupc = SuperSize (jb ); #endif /* Copy from U(k,j) to tempu[], padding zeros. */ for (jj = iukp; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; //tempu += lead_zero; #if (_OPENMP>=201307) #pragma omp simd #endif for (i = 0; i < segsize; ++i) tempu[i+lead_zero] = uval[rukp+i]; rukp += segsize; tempu += gemm_k_pad; } } } /* parallel for j = jj0 .. nub */ #if 0 if (ldu==0) printf("[%d] .. k0 %d, before updating: ldu %d, Lnbrow %d, Rnbrow %d, ncols %d\n",iam,k0,ldu,Lnbrow,Rnbrow, ncols); fflush(stdout); #endif GatherMOP += 2*ldu*ncols; } /* end if (nbrow>0), end gather U blocks */ GatherUTimer += SuperLU_timer_() - tt_start; int jj_cpu = nub; /* limit between CPU and GPU */ int thread_id; /*tempv = bigV;*/ /********************** * Gather L blocks * **********************/ tt_start = SuperLU_timer_(); /* Loop through the look-ahead blocks to copy Lval into the buffer */ #ifdef _OPENMP #pragma omp parallel for private(j,jj,tempu,tempv) default (shared) #endif for (i = 0; i < lookAheadBlk; ++i) { int StRowDest, temp_nbrow; if ( i==0 ) { StRowDest = 0; temp_nbrow = lookAheadFullRow[0]; } else { StRowDest = lookAheadFullRow[i-1]; temp_nbrow = lookAheadFullRow[i]-lookAheadFullRow[i-1]; } int StRowSource = lookAheadStRow[i]; /* Now copying one block into L lookahead buffer */ /* #pragma omp parallel for (gives slow down) */ // for (int j = 0; j < knsupc; ++j) { for (j = knsupc-ldu; j < knsupc; ++j) { /* skip leading columns corresponding to zero U rows */ #if 1 /* Better let compiler generate memcpy or vectorized code. */ //tempu = &lookAhead_L_buff[StRowDest + j*LDlookAhead_LBuff]; //tempu = &lookAhead_L_buff[StRowDest + j * Lnbrow]; tempu = &lookAhead_L_buff[StRowDest + (j - (knsupc-ldu)) * Lnbrow]; tempv = &lusup[luptr+j*nsupr + StRowSource]; #if (_OPENMP>=201307) #pragma omp simd #endif for (jj = 0; jj < temp_nbrow; ++jj) tempu[jj] = tempv[jj]; #else //memcpy(&lookAhead_L_buff[StRowDest + j*LDlookAhead_LBuff], memcpy(&lookAhead_L_buff[StRowDest + (j - (knsupc-ldu)) * Lnbrow], &lusup[luptr+j*nsupr + StRowSource], temp_nbrow * sizeof(double) ); #endif } /* end for j ... */ } /* parallel for i ... gather Lval blocks from lookahead window */ /* Loop through the remaining blocks to copy Lval into the buffer */ #ifdef _OPENMP #pragma omp parallel for private(i,j,jj,tempu,tempv) default (shared) \ schedule(SCHEDULE_STRATEGY) #endif for (int i = 0; i < RemainBlk; ++i) { int StRowDest, temp_nbrow; if ( i==0 ) { StRowDest = 0; temp_nbrow = Remain_info[0].FullRow; } else { StRowDest = Remain_info[i-1].FullRow; temp_nbrow = Remain_info[i].FullRow - Remain_info[i-1].FullRow; } int StRowSource = RemainStRow[i]; /* Now copying a block into L remaining buffer */ // #pragma omp parallel for (gives slow down) // for (int j = 0; j < knsupc; ++j) { for (int j = knsupc-ldu; j < knsupc; ++j) { // printf("StRowDest %d Rnbrow %d StRowSource %d \n", StRowDest,Rnbrow ,StRowSource); #if 1 /* Better let compiler generate memcpy or vectorized code. */ //tempu = &Remain_L_buff[StRowDest + j*LDRemain_LBuff]; //tempu = &Remain_L_buff[StRowDest + (j - (knsupc-ldu)) * Rnbrow]; tempu = &Remain_L_buff[StRowDest + (j - (knsupc-ldu)) * gemm_m_pad]; tempv = &lusup[luptr + j*nsupr + StRowSource]; #if (_OPENMP>=201307) #pragma omp simd #endif for (jj = 0; jj < temp_nbrow; ++jj) tempu[jj] = tempv[jj]; #else //memcpy(&Remain_L_buff[StRowDest + j*LDRemain_LBuff], memcpy(&Remain_L_buff[StRowDest + (j - (knsupc-ldu)) * gemm_m_pad], &lusup[luptr+j*nsupr + StRowSource], temp_nbrow * sizeof(double) ); #endif } /* end for j ... */ } /* parallel for i ... copy Lval into the remaining buffer */ tt_end = SuperLU_timer_(); GatherLTimer += tt_end - tt_start; /************************************************************************* * Perform GEMM (look-ahead L part, and remain L part) followed by Scatter *************************************************************************/ tempu = bigU; /* setting to the start of padded U(k,:) */ if ( Lnbrow>0 && ldu>0 && ncols>0 ) { /* Both L(:,k) and U(k,:) nonempty */ /*************************************************************** * Updating blocks in look-ahead window of the LU(look-ahead-rows,:) ***************************************************************/ /* Count flops for total GEMM calls */ ncols = Ublock_info[nub-1].full_u_cols; flops_t flps = 2.0 * (flops_t)Lnbrow * ldu * ncols; LookAheadScatterMOP += 3 * Lnbrow * ncols; /* scatter-add */ schur_flop_counter += flps; stat->ops[FACT] += flps; LookAheadGEMMFlOp += flps; #ifdef _OPENMP #pragma omp parallel default (shared) private(thread_id) { thread_id = omp_get_thread_num(); /* Ideally, should organize the loop as: for (j = 0; j < nub; ++j) { for (lb = 0; lb < lookAheadBlk; ++lb) { L(lb,k) X U(k,j) -> tempv[] } } But now, we use collapsed loop to achieve more parallelism. Total number of block updates is: (# of lookAheadBlk in L(:,k)) X (# of blocks in U(k,:)) */ int i = sizeof(int); int* indirect_thread = indirect + (ldt + CACHELINE/i) * thread_id; int* indirect2_thread = indirect2 + (ldt + CACHELINE/i) * thread_id; #pragma omp for \ private (nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \ schedule(dynamic) #else /* not use _OPENMP */ thread_id = 0; int* indirect_thread = indirect; int* indirect2_thread = indirect2; #endif /* Each thread is assigned one loop index ij, responsible for block update L(lb,k) * U(k,j) -> tempv[]. */ for (int ij = 0; ij < lookAheadBlk*(nub-jj0); ++ij) { /* jj0 starts after look-ahead window. */ int j = ij/lookAheadBlk + jj0; int lb = ij%lookAheadBlk; /* Getting U block U(k,j) information */ /* unsigned long long ut_start, ut_end; */ int_t rukp = Ublock_info[j].rukp; int_t iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize(jb); int ljb = LBj (jb, grid); /* destination column block */ int st_col; int ncols; /* Local variable counts only columns in the block */ if ( j > jj0 ) { /* jj0 starts after look-ahead window. */ ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols; st_col = Ublock_info[j-1].full_u_cols; } else { ncols = Ublock_info[j].full_u_cols; st_col = 0; } /* Getting L block L(i,k) information */ int_t lptr = lookAhead_lptr[lb]; int ib = lookAhead_ib[lb]; int temp_nbrow = lsub[lptr+1]; lptr += LB_DESCRIPTOR; int cum_nrow = (lb==0 ? 0 : lookAheadFullRow[lb-1]); /* Block-by-block GEMM in look-ahead window */ #if 0 i = sizeof(double); double* tempv1 = bigV + thread_id * (ldt*ldt + CACHELINE/i); #else double* tempv1 = bigV + thread_id * (ldt*ldt); #endif #if ( PRNTlevel>= 1) if (thread_id == 0) tt_start = SuperLU_timer_(); gemm_max_m = SUPERLU_MAX(gemm_max_m, temp_nbrow); gemm_max_n = SUPERLU_MAX(gemm_max_n, ncols); gemm_max_k = SUPERLU_MAX(gemm_max_k, ldu); #endif #if defined (USE_VENDOR_BLAS) dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, //&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow, &lookAhead_L_buff[cum_nrow], &Lnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1); #else dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, //&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow, &lookAhead_L_buff[cum_nrow], &Lnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow); #endif #if (PRNTlevel>=1 ) if (thread_id == 0) { tt_end = SuperLU_timer_(); LookAheadGEMMTimer += tt_end - tt_start; tt_start = tt_end; } #endif if ( ib < jb ) { dscatter_u ( ib, jb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, lsub, usub, tempv1, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { #if 0 //#ifdef USE_VTUNE __SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores __itt_resume(); // start VTune, again use 2 underscores #endif dscatter_l ( ib, ljb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, usub, lsub, tempv1, indirect_thread, indirect2_thread, Lrowind_bc_ptr, Lnzval_bc_ptr, grid ); #if 0 //#ifdef USE_VTUNE __itt_pause(); // stop VTune __SSC_MARK(0x222); // stop SDE tracing #endif } #if ( PRNTlevel>=1 ) if (thread_id == 0) LookAheadScatterTimer += SuperLU_timer_() - tt_start; #endif } /* end omp for ij = ... */ #ifdef _OPENMP } /* end omp parallel */ #endif } /* end if Lnbrow>0 ... look-ahead GEMM and scatter */ /*************************************************************** * Updating remaining rows and columns on CPU. ***************************************************************/ ncols = jj_cpu==0 ? 0 : Ublock_info[jj_cpu-1].full_u_cols; if ( Rnbrow>0 && ldu>0 ) { /* There are still blocks remaining ... */ double flps = 2.0 * (double)Rnbrow * ldu * ncols; schur_flop_counter += flps; stat->ops[FACT] += flps; #if ( PRNTlevel>=1 ) RemainGEMM_flops += flps; gemm_max_m = SUPERLU_MAX(gemm_max_m, Rnbrow); gemm_max_n = SUPERLU_MAX(gemm_max_n, ncols); gemm_max_k = SUPERLU_MAX(gemm_max_k, ldu); tt_start = SuperLU_timer_(); /* printf("[%d] .. k0 %d, before large GEMM: %d-%d-%d, RemainBlk %d\n", iam, k0,Rnbrow,ldu,ncols,RemainBlk); fflush(stdout); assert( Rnbrow*ncols < bigv_size ); */ #endif /* calling aggregated large GEMM, result stored in bigV[]. */ #if defined (USE_VENDOR_BLAS) //dgemm_("N", "N", &Rnbrow, &ncols, &ldu, &alpha, dgemm_("N", "N", &gemm_m_pad, &gemm_n_pad, &gemm_k_pad, &alpha, //&Remain_L_buff[(knsupc-ldu)*Rnbrow], &Rnbrow, &Remain_L_buff[0], &gemm_m_pad, &bigU[0], &gemm_k_pad, &beta, bigV, &gemm_m_pad, 1, 1); #else //dgemm_("N", "N", &Rnbrow, &ncols, &ldu, &alpha, dgemm_("N", "N", &gemm_m_pad, &gemm_n_pad, &gemm_k_pad, &alpha, //&Remain_L_buff[(knsupc-ldu)*Rnbrow], &Rnbrow, &Remain_L_buff[0], &gemm_m_pad, &bigU[0], &gemm_k_pad, &beta, bigV, &gemm_m_pad); #endif #if ( PRNTlevel>=1 ) tt_end = SuperLU_timer_(); RemainGEMMTimer += tt_end - tt_start; #if ( PROFlevel>=1 ) //fprintf(fgemm, "%8d%8d%8d %16.8e\n", Rnbrow, ncols, ldu, // (tt_end - tt_start)*1e6); // time in microsecond //fflush(fgemm); gemm_stats[gemm_count].m = Rnbrow; gemm_stats[gemm_count].n = ncols; gemm_stats[gemm_count].k = ldu; gemm_stats[gemm_count++].microseconds = (tt_end - tt_start) * 1e6; #endif tt_start = SuperLU_timer_(); #endif #ifdef USE_VTUNE __SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores __itt_resume(); // start VTune, again use 2 underscores #endif /* Scatter into destination block-by-block. */ #ifdef _OPENMP #pragma omp parallel default(shared) private(thread_id) { thread_id = omp_get_thread_num(); /* Ideally, should organize the loop as: for (j = 0; j < jj_cpu; ++j) { for (lb = 0; lb < RemainBlk; ++lb) { L(lb,k) X U(k,j) -> tempv[] } } But now, we use collapsed loop to achieve more parallelism. Total number of block updates is: (# of RemainBlk in L(:,k)) X (# of blocks in U(k,:)) */ int i = sizeof(int); int* indirect_thread = indirect + (ldt + CACHELINE/i) * thread_id; int* indirect2_thread = indirect2 + (ldt + CACHELINE/i) * thread_id; #pragma omp for \ private (j,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \ schedule(dynamic) #else /* not use _OPENMP */ thread_id = 0; int* indirect_thread = indirect; int* indirect2_thread = indirect2; #endif /* Each thread is assigned one loop index ij, responsible for block update L(lb,k) * U(k,j) -> tempv[]. */ for (int ij = 0; ij < RemainBlk*(jj_cpu-jj0); ++ij) { /* jj_cpu := nub, jj0 starts after look-ahead window. */ int j = ij / RemainBlk + jj0; /* j-th block in U panel */ int lb = ij % RemainBlk; /* lb-th block in L panel */ /* Getting U block U(k,j) information */ /* unsigned long long ut_start, ut_end; */ int_t rukp = Ublock_info[j].rukp; int_t iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize(jb); int ljb = LBj (jb, grid); int st_col; int ncols; if ( j>jj0 ) { ncols = Ublock_info[j].full_u_cols - Ublock_info[j-1].full_u_cols; st_col = Ublock_info[j-1].full_u_cols; } else { ncols = Ublock_info[j].full_u_cols; st_col = 0; } /* Getting L block L(i,k) information */ int_t lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr+1]; lptr += LB_DESCRIPTOR; int cum_nrow = (lb==0 ? 0 : Remain_info[lb-1].FullRow); /* tempv1 points to block(i,j) in bigV : LDA == Rnbrow */ //double* tempv1 = bigV + (st_col * Rnbrow + cum_nrow); Sherry double* tempv1 = bigV + (st_col * gemm_m_pad + cum_nrow); /* Sherry */ // printf("[%d] .. before scatter: ib %d, jb %d, temp_nbrow %d, Rnbrow %d\n", iam, ib, jb, temp_nbrow, Rnbrow); fflush(stdout); /* Now scattering the block */ if ( ib < jb ) { dscatter_u ( ib, jb, nsupc, iukp, xsup, //klst, Rnbrow, /*** klst, temp_nbrow, Sherry */ klst, gemm_m_pad, /*** klst, temp_nbrow, Sherry */ lptr, temp_nbrow, /* row dimension of the block */ lsub, usub, tempv1, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { dscatter_l( ib, ljb, nsupc, iukp, xsup, //klst, temp_nbrow, Sherry klst, gemm_m_pad, /*** temp_nbrow, Sherry */ lptr, temp_nbrow, /* row dimension of the block */ usub, lsub, tempv1, indirect_thread, indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr, grid ); } } /* end omp for (int ij =...) */ #ifdef _OPENMP } /* end omp parallel region */ #endif #if ( PRNTlevel>=1 ) RemainScatterTimer += SuperLU_timer_() - tt_start; #endif #ifdef USE_VTUNE __itt_pause(); // stop VTune __SSC_MARK(0x222); // stop SDE tracing #endif } /* end if Rnbrow>0 ... update remaining block */ } /* end if L(:,k) and U(k,:) are not empty */
DRB045-doall1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Simplest one dimension array computation */ int a[100]; int main() { int i; #pragma omp target data map(tofrom: a[0:100]) { #pragma omp target parallel for for (i = 0; i < 100; i++) a[i] = i; } #pragma omp target data map(tofrom: a[0:100]) { #pragma omp target parallel for for (i = 0; i < 100; i++) a[i] = a[i] + 1; } for (i = 0; i < 100; i++) printf("%d\n", a[i]); return 0; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i+1;}) #define ZERO(X) ZERO_ARRAY(N, X) #define DUMP_SUCCESS9() { \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ } // // FIXME: // Add support for 'shared', 'lastprivate' // int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double S[N]; double p[2]; int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 224; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); // // Test: proc_bind clause // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(master) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(close) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(spread) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() // // Test: private, shared clauses on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E) #include "tpf_defines.h" // FIXME: shared(a) where 'a' is an implicitly mapped scalar does not work. // FIXME: shared(A) private(A) does not generate correct results. for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR2( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR3( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR4( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR5( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR6( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR7( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR8( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR9( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) } DUMP_SUCCESS9() // // Test: firstprivate clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES firstprivate(p,q) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #if 0 FIXME // // Test: lastprivate clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES lastprivate(q) #include "tpf_defines.h" // FIXME: modify to t=1 and in tpf_defines.h to use host after bug fix. // FIXME: variable is not private. for (int t = 2; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p[1]; \ double q[1]; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ q[0] = D[i] + E[i]; \ A[i] = p[0]; \ B[i] = q[0]; \ }, { double tmp = p[0] + q[0]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N+1+ N/2*(N+1))) } FIXME: private of non-scalar does not work. // // Test: private clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES private(p) #include "tpf_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = 2; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[i] += p[0]; \ B[i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } FIXME: private of non-scalar does not work. // // Test: firstprivate clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES firstprivate(p) #include "tpf_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = -4; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p[0]; \ B[i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } #endif // // Test: collapse clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES collapse(2) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR2( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR3( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR4( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR5( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR6( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR7( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR8( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR9( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) } DUMP_SUCCESS9() // // Test: ordered clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES ordered #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() // // Test: Ensure coalesced scheduling on GPU. // if (cpuExec == 0) { #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES #include "tpf_defines.h" int threads[1]; threads[0] = 32; TARGET_PARALLEL_FOR1( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) TARGET_PARALLEL_FOR2( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) TARGET_PARALLEL_FOR7( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) } else { DUMP_SUCCESS(3); } return 0; }
convolution_3x3_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8a-inch/8a-64-outch; kernel_tm_pack8to1.create(8 * inch / 8, 64, outch / 8 + outch % 8, (size_t)2u * 8, 8); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to1.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd64_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); __fp16* output2_tm = top_blob_tm.channel(p + 2); __fp16* output3_tm = top_blob_tm.channel(p + 3); __fp16* output4_tm = top_blob_tm.channel(p + 4); __fp16* output5_tm = top_blob_tm.channel(p + 5); __fp16* output6_tm = top_blob_tm.channel(p + 6); __fp16* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.8h}, [%1], #16 \n" "st1 {v25.8h}, [%2], #16 \n" "st1 {v26.8h}, [%3], #16 \n" "st1 {v27.8h}, [%4], #16 \n" "st1 {v28.8h}, [%5], #16 \n" "st1 {v29.8h}, [%6], #16 \n" "st1 {v30.8h}, [%7], #16 \n" "st1 {v31.8h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h}, [%1], #8 \n" "st1 {v25.4h}, [%2], #8 \n" "st1 {v26.4h}, [%3], #8 \n" "st1 {v27.4h}, [%4], #8 \n" "st1 {v28.4h}, [%5], #8 \n" "st1 {v29.4h}, [%6], #8 \n" "st1 {v30.4h}, [%7], #8 \n" "st1 {v31.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8h}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.h}[0], [%1], #2 \n" "st1 {v30.h}[1], [%2], #2 \n" "st1 {v30.h}[2], [%3], #2 \n" "st1 {v30.h}[3], [%4], #2 \n" "st1 {v30.h}[4], [%5], #2 \n" "st1 {v30.h}[5], [%6], #2 \n" "st1 {v30.h}[6], [%7], #2 \n" "st1 {v30.h}[7], [%8], #2 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.4h, v16.4h, v0.h[0] \n" "fmla v30.4h, v17.4h, v0.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "fmla v30.4h, v18.4h, v0.h[2] \n" "fmla v30.4h, v19.4h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.4h, v20.4h, v0.h[4] \n" "fmla v30.4h, v21.4h, v0.h[5] \n" "fmla v30.4h, v22.4h, v0.h[6] \n" "fmla v30.4h, v23.4h, v0.h[7] \n" "bne 0b \n" "st1 {v30.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); _sum0 = vfmaq_f16(_sum0, _r0, _k0); kptr += 4; r0 += 4; } __fp16 sum0 = vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0)))); output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const __fp16 bias0 = bias ? bias[p] : 0.f; // float32x2_t _bias0 = vdup_n_f32(bias0); __fp16 tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 1; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 1; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 5; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 6; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO neon optimize for (int m = 0; m < 8; m++) { __fp16 tmp024a = output0_tm_1[0] + output0_tm_2[0]; __fp16 tmp135a = output0_tm_1[0] - output0_tm_2[0]; __fp16 tmp024b = output0_tm_3[0] + output0_tm_4[0]; __fp16 tmp135b = output0_tm_3[0] - output0_tm_4[0]; __fp16 tmp024c = output0_tm_5[0] + output0_tm_6[0]; __fp16 tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } __fp16* output0 = out0.row<__fp16>(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const __fp16* tmp0 = tmp[m]; __fp16 tmp024a = tmp0[1] + tmp0[2]; __fp16 tmp135a = tmp0[1] - tmp0[2]; __fp16 tmp024b = tmp0[3] + tmp0[4]; __fp16 tmp135b = tmp0[3] - tmp0[4]; __fp16 tmp024c = tmp0[5] + tmp0[6]; __fp16 tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. /// \param AllowExtractorSinking Flag to include sinking instructions, /// emitted by CodeExtractor, in the /// outlined region. Default is false. void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location at which code might need to be generated or a location that is /// the target of control transfer. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. /// \param ContinuationBB is the basic block target to leave the body. /// /// Note that all blocks pointed to by the arguments have terminators. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { template <typename T, typename U> LocationDescription(const IRBuilder<T, U> &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// TODO: Workshare loops with static scheduling may contain up to two loops /// that fulfill the requirements of an OpenMP canonical loop. One for /// iterating over all iterations of a chunk and another one for iterating /// over all chunks that are executed on the same thread. Returning /// CanonicalLoopInfo objects representing them may eventually be useful for /// the apply clause planned in OpenMP 6.0, but currently whether these are /// canonical loops is irrelevant. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction. They are /// expected to atomically update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {} /// Returns the type of the element being reduced. Type *getElementType() const { return Variable->getType()->getPointerElementType(); } /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param Variables A list of variables in which the reduction /// results will be stored (values of pointer type). /// \param PrivateVariables A list of variables in which the partial /// reduction results are stored (values of pointer /// type). Coindexed with Variables. Privatization /// must be handled separately from this call. /// \param ReductionGen A list of generators for non-atomic reduction /// bodies. Each takes a pair of partially reduced /// values and sets a new one. /// \param AtomicReductionGen A list of generators for atomic reduction /// bodies, empty if the reduction cannot be /// performed with atomics. Each takes a pair of /// _pointers_ to paritally reduced values and /// atomically stores the result into the first. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Value *getOrCreateIdent(Constant *SrcLocStr, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); // Get the type corresponding to __kmpc_impl_lanemask_t from the deviceRTL Type *getLanemaskType(); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArgs /// \param MapnamesArg /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param TotalNbOperand Number of operand in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the master. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of /// the update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXLHSInRHSPart); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of /// the update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Preheader = nullptr; BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Body = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; BasicBlock *After = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const { assert(isValid() && "Requires a valid canonical loop"); return Preheader; } /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return Body; } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return After; } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
GB_binop__iseq_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fp64) // A*D function (colscale): GB (_AxD__iseq_fp64) // D*A function (rowscale): GB (_DxB__iseq_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fp64) // C=scalar+B GB (_bind1st__iseq_fp64) // C=scalar+B' GB (_bind1st_tran__iseq_fp64) // C=A+scalar GB (_bind2nd__iseq_fp64) // C=A'+scalar GB (_bind2nd_tran__iseq_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FP64 || GxB_NO_ISEQ_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
complex_pk.c
#include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include "wgrib2.h" // #define DEBUG // // public domain 7/2009 Wesley Ebisuzaki // // return number of bits for an unsigned int // #define LEN_SEC_MAX 1023 // #define WIDTH_BITS 10 // #define LEN_SEC_MAX 255 // #define LEN_SEC_MAX 102300000 // #define LEN_SEC_MAX 1023 // #define LEN_BITS 10 // #define LEN_SEC_MAX 511 // #define LEN_BITS 9 ///// #define LEN_SEC_MAX 255 ///// #define LEN_BITS 8 // #define LEN_SEC_MAX 127 // #define LEN_BITS 7 // #define LEN_SEC_MAX 63 // #define LEN_BITS 6 static int find_nbits(unsigned int i) { #if !defined __GNUC__ || __GNUC__ < 4 int j; j = 0; while (i > 65535) { i = i >> 16; j += 16; } // i = 16 bits if (i > 255) { i = i >> 8; j += 8; } // i = 8 bits if (i > 15) { i = i >> 4; j += 4; } // i = 4 bits if (i > 3) { i = i >> 2; j += 2; } // i = 2 bits return (i >= 2) ? j + 2 : j + i; #else return (i == 0) ? 0 : 8 * sizeof(unsigned int) - __builtin_clz(i); #endif } struct section { int mn, mx, missing; // stats int i0, i1; // pointers to data[] struct section *head, *tail; }; static int sizeofsection(struct section *s, int ref_bits, int width_bits, int has_undef) { if (s->mn == INT_MAX) return ref_bits + width_bits; // all undef if (s->mn == s->mx) { if (s->missing == 0) return ref_bits + width_bits; return (s->i1-s->i0+1)*has_undef + ref_bits + width_bits; } return find_nbits(s->mx-s->mn + has_undef)*(s->i1-s->i0+1) + ref_bits + width_bits; } static int sizeofsection2(int mn, int mx, int n, int ref_bits, int width_bits, int has_undef_sec, int has_undef) { if (mn == INT_MAX) return ref_bits + width_bits; if (mn == mx) { if (has_undef_sec == 0) return ref_bits + width_bits; return n*has_undef + ref_bits + width_bits; } return find_nbits(mx-mn + has_undef)*n + ref_bits + width_bits; } static int size_all(struct section *s, int ref_bits, int width_bits, int has_undef) { int bits; bits = 0; while (s) { bits += sizeofsection(s, ref_bits, width_bits, has_undef); s = s->tail; } return (bits+7)/8; } static void move_one_left(struct section *s, int *v) { struct section *t; int val, i, j, k; t = s->tail; s->i1 += 1; t->i0 += 1; val = v[s->i1]; // update s statistics if (val == INT_MAX) s->missing = 1; else { s->mx = s->mx > val ? s->mx : val; s->mn = s->mn < val ? s->mn : val; } // remove t? if (t->i0 > t->i1) { s->tail = t->tail; t = s->tail; if (t) t->head = s; return; } // update s statistics if (val == INT_MAX) { for (i = t->i0; i <= t->i1; i++) { if (v[i] == INT_MAX) return; } t->missing = 0; return; } if (val == t->mx) { k = INT_MAX; for (j = 0, i = t->i0; i <= t->i1; i++) { if (v[i] != INT_MAX) { if (j == 0) { k = v[i]; j++; } else k = k < v[i] ? v[i] : k; } } t->mx = k; return; } if (val == t->mn) { k = INT_MAX; for (j = 0, i = t->i0; i <= t->i1; i++) { if (v[i] != INT_MAX) { if (j == 0) { k = v[i]; j++; } else k = k > v[i] ? v[i] : k; } } t->mn = k; return; } return; } static void move_one_right(struct section *s, int *v) { struct section *t; int val, i, j, k; t = s->tail; s->i1 -= 1; t->i0 -= 1; val = v[t->i0]; // update t statistics if (val == INT_MAX) t->missing = 1; else { t->mx = t->mx > val ? t->mx : val; t->mn = t->mn < val ? t->mn : val; } // if s is empty, copy t to s and recalculate if (s->i0 > s->i1) { s->i0 = t->i0; s->i1 = t->i1; s->tail = t->tail; s->mx = s->mn = INT_MAX; j = s->missing = 0; for (i = s->i0; i <= s->i1; i++) { if (v[i] == INT_MAX) s->missing = 1; else if (j == 0) { s->mx = s->mn = v[i]; j++; } else { s->mx = s->mx > v[i] ? s->mx : v[i]; s->mn = s->mn < v[i] ? s->mx : v[i]; } } return; } // update s statistics if (val == INT_MAX) { for (i = s->i0; i <= s->i1; i++) { if (v[i] == INT_MAX) return; } s->missing = 0; return; } if (val == s->mx) { k = INT_MAX; for (j = 0, i = s->i0; i <= s->i1; i++) { if (v[i] != INT_MAX) { if (j == 0) { k = v[i]; j++; } else k = k < v[i] ? v[i] : k; } } s->mx = k; return; } if (val == s->mn) { k = INT_MAX; for (j = 0, i = s->i0; i <= s->i1; i++) { if (v[i] != INT_MAX) { if (j == 0) { k = v[i]; j++; } else k = k > v[i] ? v[i] : k; } } s->mn = k; return; } return; } static void exchange(struct section *s, int *v, int has_undef, int LEN_SEC_MAX) { struct section *t; int val0, val1, nbit_s, nbit_t; if (s == NULL) return; while ((t = s->tail) != NULL) { // nbit_s = find_nbits(s->mx - s->mn + has_undef); // nbit_t = find_nbits(t->mx - t->mn + has_undef); if (s->mn == INT_MAX) nbit_s = 0; else if (s->mn == s->mx) nbit_s = s->missing; else nbit_s = find_nbits(s->mx - s->mn + has_undef); if (t->mn == INT_MAX) nbit_t = 0; else if (t->mn == t->mx) nbit_t = t->missing; else nbit_t = find_nbits(t->mx - t->mn + has_undef); if (nbit_s == nbit_t) { s = t; continue; } val0 = v[s->i1]; val1 = v[t->i0]; if (s->missing == 1 || t->missing == 1) { s=t; continue; } // if (val0 == INT_MAX || val1 == INT_MAX) { s=t; continue; } if (nbit_s < nbit_t && val1 == INT_MAX) { if ((s->i1-s->i0) < LEN_SEC_MAX && s->mx != s->mn) move_one_left(s, v); else s = t; continue; } if (nbit_s > nbit_t && val0 == INT_MAX) { if ((t->i1-t->i0) < LEN_SEC_MAX && t->mn != t->mx) { move_one_right(s, v); } else s = t; continue; } // if (s->missing == 1 || t->missing == 1) { s=t; continue; } // 3/2014 val0 = v[s->i1]; // 3/2014 val1 = v[t->i0]; if (nbit_s < nbit_t && (s->i1-s->i0) < LEN_SEC_MAX && val1 >= s->mn && val1 <= s->mx) { move_one_left(s, v); } else if (nbit_s > nbit_t && (t->i1-t->i0) < LEN_SEC_MAX && val0 >= t->mn && val0 <= t->mx) { move_one_right(s, v); } else s = s->tail; } } static void merge_j(struct section *h, int ref_bits, int width_bits, int has_undef, int param, int LEN_SEC_MAX) { struct section *t, *m; int size_head, size_mid, size_tail, saving_mt, saving_hm; int min0, max0, min1, max1; size_head = size_mid = size_tail = 0; while (h && (m = h->tail) ) { t = m->tail; // h -> m -> t // find savings of merged h - m saving_hm = -1; min0 = max0 = min1 = max1 = 0; // turn off error warnings if (m->i1 - h->i0 < LEN_SEC_MAX) { if (m->mn == INT_MAX) { max0 = h->mx; min0 = h->mn; } else if (h->mn == INT_MAX) { max0 = m->mx; min0 = m->mn; } else { min0 = h->mn < m->mn ? h->mn : m->mn; max0 = h->mx > m->mx ? h->mx : m->mx; } if (max0-min0 <= param) { if (size_head == 0) size_head = sizeofsection(h, ref_bits, width_bits, has_undef); if (size_mid == 0) size_mid = sizeofsection(m, ref_bits, width_bits, has_undef); saving_hm = size_head + size_mid - sizeofsection2(min0, max0, m->i1-h->i0+1, ref_bits, width_bits, h->missing || m->missing , has_undef); } } // find savings of merged m-t saving_mt = -1; if (t && t->i1 - m->i0 < LEN_SEC_MAX) { if (m->mn == INT_MAX) { max1 = t->mx; min1 = t->mn; } else if (t->mn == INT_MAX) { max1 = m->mx; min1 = m->mn; } else { min1 = m->mn < t->mn ? m->mn : t->mn; max1 = m->mx > t->mx ? m->mx : t->mx; } if (max1-min1 <= param) { if (size_mid == 0) size_mid = sizeofsection(m, ref_bits, width_bits, has_undef); if (size_tail == 0) size_tail = sizeofsection(t, ref_bits, width_bits, has_undef); saving_mt = size_mid + size_tail - sizeofsection2(min1, max1, t->i1-m->i0+1, ref_bits, width_bits, m->missing || t->missing, has_undef); } } if (saving_hm >= saving_mt && saving_hm >= 0) { // merge h and m h->i1 = m->i1; h->tail = m->tail; h->mn = min0; h->mx = max0; h->missing = h->missing || m->missing; m = h->tail; if (m) m->head = h; if (h->head) h = h->head; size_head = size_mid = size_tail = 0; } else if (saving_mt >= saving_hm && saving_mt >= 0) { // merge m and t m->i1 = t->i1; m->tail = t->tail; m->mn = min1; m->mx = max1; m->missing = m->missing || t->missing; t = m->tail; if (t) t->head = m; size_head = size_mid = size_tail = 0; } else { // no merging h = h->tail; size_head = size_mid; size_mid = size_tail; size_tail = 0; } } } /* * writes out a complex packed grib message */ int complex_grib_out(unsigned char **sec, float *data, unsigned int ndata, int use_scale, int dec_scale, int bin_scale, int wanted_bits, int max_bits, int packing_mode, int use_bitmap, struct seq_file *out) { int j, j0, k, *v, binary_scale, nbits, has_undef, extra_0, extra_1; unsigned int i, ii; int vmn, vmx, vbits; unsigned char *sec0, *sec1, *sec2 , *sec3, *sec4, *sec5, *sec6, *sec7; double max_val, min_val, ref, frange, dec_factor, scale; float mn, mx; struct section start, *list, *list_backup, *s; int ngroups, grefmx, glenmn, glenmx, gwidmn, gwidmx, len_last; int size_sec7; int *refs, *lens, *widths, *itmp, *itmp2; // int est_group_width = 12; int est_group_width = 6; unsigned int ndef, nndata, nstruct; int LEN_SEC_MAX = 127; int LEN_BITS = 7; ndef = 0; #pragma omp parallel for private(i) reduction(+:ndef) for (i = 0; i < ndata; i++) { if (DEFINED_VAL(data[i])) ndef = ndef + 1; } /* required passed sections */ sec0 = sec[0]; sec1 = sec[1]; sec2 = sec[2]; sec3 = sec[3]; sec4 = sec[4]; if (ndef == 0) { // all undefined values sec5 = (unsigned char *) malloc(47 * sizeof(unsigned char)); if (sec5 == NULL) fatal_error("complex_grib_out memory allocation sec5",""); uint_char(47, sec5); sec5[4] = 5; // section 5 uint_char(ndata, sec5+5); // number of points uint2_char(2,sec5+9); // data template 2 flt2ieee((float) 0.0,sec5+11); // reference value int2_char(0,sec5+15); // binary scaling int2_char(0,sec5+17); // decimal scaling sec5[19] = 8; // num bits for packed val sec5[20] = 0; // original = float sec5[21] = 1; // general group splitting sec5[22] = 1; // primary missing values flt2ieee((float) 9.999e20,sec5+23); // missing value sec5[27] = sec5[28] = sec5[29] = sec5[30] = 255; // secondary missing value uint_char(1,sec5+31); // one group sec5[35] = 0; // group width reference sec5[36] = 8; // group width bits uint_char(ndata,sec5+37); // group length ref sec5[41] = 1; // inc uint_char(ndata,sec5+42); // len of last group sec5[46] = 8; // group length width // no bitmap is used sec6 = (unsigned char *) malloc(6 * sizeof(unsigned char)); if (sec6 == NULL) fatal_error("complex_grib_out memory allocation sec6",""); uint_char(6, sec6); // size of sec 6 sec6[4] = 6; // section 6 sec6[5] = 255; // section 6 - no bitmap sec7 = (unsigned char *) malloc(8); if (sec7 == NULL) fatal_error("complex_grib_out memory allocation sec7",""); uint_char(8, sec7); // size of section sec7[4] = 7; // section 7 sec7[5] = 255; // group reference sec7[6] = 0; // group width sec7[7] = 0; // group length k = wrt_sec(sec0, sec1, sec2, sec3, sec4, sec5, sec6, sec7, out); free(sec5); free(sec6); free(sec7); return k; } /* compute bitmap section */ if (use_bitmap == 0 || ndef == ndata) { // no bitmap is used sec6 = (unsigned char *) malloc(6 * sizeof(unsigned char)); if (sec6 == NULL) fatal_error("complex_bitmap_grib_out memory allocation sec6",""); uint_char(6, sec6); // size of sec 6 sec6[4] = 6; // section 6 sec6[5] = 255; // no bitmap } else { i = ndata; sec6 = mk_bms(data, &i); if (i != ndef) fatal_error("complex_grib_out prog error 1",""); } /* if bitmap is used: data[0..ndef-1] has grid point values, no undefined values if bitmap is not used: data[0..ndata] has grid point values, possible undefined values */ nndata = use_bitmap ? ndef : ndata; has_undef = use_bitmap ? 0 : ndata != ndef; v = (int *) malloc( ((size_t) nndata) * sizeof(int)); sec5 = (unsigned char *) malloc(packing_mode == 1 ? 47 : 49); if (v == NULL || sec5 == NULL) fatal_error("complex_grib_out memory allocation v",""); if (min_max_array(data, ndata, &mn, &mx) != 0) fatal_error("complex_pk: min max error",""); min_val = (double) mn; max_val = (double) mx; // printf("min val %lf max val %lf\n",min_val,max_val); binary_scale = bin_scale; if (use_scale == 0) { // ECMWF style ref = min_val; frange = max_val - ref; dec_scale = 0; if (frange != 0.0) { frexp(frange, &j); binary_scale = j - wanted_bits; nbits = wanted_bits; scale = ldexp(1.0, -binary_scale); frange = floor((max_val-ref)*scale + 0.5); frexp(frange, &j); if (j != nbits) binary_scale++; } else { binary_scale = nbits = 0; scale = 1; } } else { if (dec_scale) { dec_factor = Int_Power(10.0, -dec_scale); min_val *= dec_factor; max_val *= dec_factor; if (has_undef) { #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { if (DEFINED_VAL(data[i])) data[i] *= dec_factor; } } else { #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { data[i] *= dec_factor; } } } scale = ldexp(1.0, -binary_scale); // ref = floor(min_val*scale)/scale; ref = min_val; frange = floor( (max_val - ref)*scale + 0.5); frexp(frange, &nbits); if (nbits > max_bits) { binary_scale += (nbits - max_bits); nbits = max_bits; } } if (binary_scale) { scale = ldexp(1.0, -binary_scale); if (has_undef) { #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { if (DEFINED_VAL(data[i])) { v[i] = floor((data[i] - ref)*scale + 0.5); v[i] = v[i] >= 0 ? v[i] : 0; } else v[i] = INT_MAX; } } else { #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { v[i] = floor((data[i] - ref)*scale + 0.5); v[i] = v[i] >= 0 ? v[i] : 0; } } } else { scale = 1.0; if (has_undef) { #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { if (DEFINED_VAL(data[i])) { v[i] = floor(data[i] - ref + 0.5); v[i] = v[i] >= 0 ? v[i] : 0; } else v[i] = INT_MAX; } } else { #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { v[i] = floor(data[i] - ref + 0.5); v[i] = v[i] >= 0 ? v[i] : 0; } } } // fprintf(stderr, "\n v[i] data \n"); // for (i = 0; i < nndata;i++) { // fprintf(stderr," %d:%d ", i, v[i]); // } // preprocessing // for (i = 0; i < N; i++) v[i] = u[i]; // for (i = 0; i < N; i++) v[i] = u[i] - u[i-1]; // for (i = 0; i < N; i++) v[i] = u[i] - 2*u[i-1] + u[i-2]; vmx = vmn = 0; extra_0 = extra_1 = 0; // turn off warnings if (packing_mode == 3) { // delta_delta(v, nndata, &vmn, &vmx, &extra_0, &extra_1); // single core version { int last, last0, penultimate; for (i = 0; i < nndata; i++) { if (v[i] != INT_MAX) { extra_0 = penultimate = v[i]; v[i++] = 0; break; } } for ( ; i < nndata; i++) { if (v[i] != INT_MAX) { extra_1 = last = v[i]; v[i++] = 0; break; } } for ( ; i < nndata; i++) { if (v[i] != INT_MAX) { last0 = v[i]; v[i] = v[i] - 2*last + penultimate; penultimate = last; last = last0; vmn = vmn > v[i] ? v[i] : vmn; vmx = vmx < v[i] ? v[i] : vmx; } } } } else if (packing_mode == 2) { // delta(v, nndata, &vmn, &vmx, &extra_0); // single core version { int last, last0; for (i = 0; i < nndata; i++) { if (v[i] != INT_MAX) { extra_0 = last = v[i]; v[i++] = 0; break; } } for ( ; i < nndata; i++) { if (v[i] != INT_MAX) { last0 = v[i]; v[i] = v[i] - last; last = last0; vmn = vmn > v[i] ? v[i] : vmn; vmx = vmx < v[i] ? v[i] : vmx; } } } } else if (packing_mode == 1) { // find min/max int_min_max_array(v, nndata, &vmn, &vmx); } //fprintf(stderr , "\n pre process v[i] data extri_0 %d extra_1 %d\n",extra_0, extra_1); //for (i = 0; i < nndata;i++) { //fprintf(stderr," %d:%d ", i, v[i]); //} //fprintf(stderr,"\n"); #ifdef DEBUG printf("2: vmx %d vmn %d nbits %d\n", vmx, vmn, find_nbits(vmx-vmn+has_undef)); #endif #pragma omp parallel for private(i) for (i = 0; i < nndata; i++) { v[i] = (v[i] != INT_MAX) ? v[i] - vmn : INT_MAX; } vmx = vmx-vmn; vbits = find_nbits(vmx+has_undef); /* size of merged struct */ ii = 0; nstruct = 1; for (i = 1; i < nndata; i++) { if (((i - ii + 1) > LEN_SEC_MAX) || (v[i] != v[ii])) { nstruct++; ii = i; } } list = (struct section *) malloc((size_t) nstruct * sizeof(struct section)); if (list == NULL) fatal_error("complex_grib_out: memory allocation of list failed",""); // initialize linked list ii = 0; list[0].mn = list[0].mx = v[0]; list[0].missing = (v[0] == INT_MAX); list[0].i0 = list[0].i1 = 0; for (i = 1; i < nndata; i++) { // join last section if ((i - list[ii].i0 < LEN_SEC_MAX) && (v[i] == list[ii].mn)) { list[ii].i1 = i; } // make new section else { ii++; list[ii].mn = list[ii].mx = v[i]; list[ii].missing = (v[i] == INT_MAX); list[ii].i0 = list[ii].i1 = i; } } list[0].head = NULL; list[ii].tail = NULL; start.tail = &list[0]; if (nstruct != ii+1) fatal_error_ii("complex_pk, nstruct=%d wanted %d",nstruct,ii+1); #pragma omp parallel for private(k) for (i = 1; i < nstruct; i++) { list[i].head = &list[i-1]; list[i-1].tail = &list[i]; } // sequence : has_undef == 0 : 2**n - 1 1, 3, 7, .. // sequence : has_undef == 1 : 2**n - 2 0, 2, 6 k = has_undef ? 2 : 1; while (k < vmx/2) { merge_j(start.tail, vbits, LEN_BITS+est_group_width, has_undef, k, LEN_SEC_MAX); #ifdef DEBUG j = size_all(start.tail, vbits, LEN_BITS+est_group_width,has_undef); printf(" complex start %d %d bytes\n", k, j); #endif k = 2*k + 1 + has_undef; } // try making segment sizes larger // 12/2015 need to segment size less 25 bits, bitstream software limitation list_backup = (struct section *) malloc(((size_t) nstruct) * sizeof(struct section)); if (list_backup == NULL) fatal_error("complex_grib_out: memory allocation of list_backup failed",""); j = size_all(start.tail, vbits, LEN_BITS+est_group_width,has_undef); j0 = j+1; #ifdef DEBUG printf(" complex start inc segments size0 %d segsize %d\n",j,LEN_SEC_MAX); #endif while (j < j0 && LEN_BITS < 25) { j0 = j; LEN_BITS++; LEN_SEC_MAX = LEN_SEC_MAX + LEN_SEC_MAX + 1; memcpy(list_backup,list, nstruct*sizeof(struct section)); merge_j(start.tail, vbits, LEN_BITS+est_group_width, has_undef, k, LEN_SEC_MAX); j = size_all(start.tail, vbits, LEN_BITS+est_group_width,has_undef); #ifdef DEBUG printf(" complex inc segments size size0 %d size1 %d segsize %d LEN_BITS=%d\n",j0,j,LEN_SEC_MAX, LEN_BITS); #endif if (j > j0) { memcpy(list,list_backup,nstruct*sizeof(struct section)); LEN_BITS--; LEN_SEC_MAX = (LEN_SEC_MAX - 1) / 2; } } free(list_backup); exchange(start.tail, v, has_undef, LEN_SEC_MAX); #ifdef DEBUG j = size_all(start.tail, vbits, LEN_BITS+est_group_width,has_undef); printf(" exchange %d bytes\n", j); #endif merge_j(start.tail, vbits, LEN_BITS+est_group_width, has_undef, vmx, LEN_SEC_MAX); #ifdef DEBUG j = size_all(start.tail, vbits, LEN_BITS+est_group_width,has_undef); printf(" complex start %d %d bytes\n", vmx, j); #endif // finished making segments // findout number of bytes for extra info (packing_mode 2/3) if (packing_mode != 1) { // packing modes 2/3 k = vmn >= 0 ? find_nbits(vmn)+1 : find_nbits(-vmn)+1; // + 1 work around for NCEP bug j = find_nbits(extra_0) + 1; if (j > k) k = j; if (packing_mode == 3) { // + 1 work around for NCEP bug j = find_nbits(extra_1) + 1; if (j > k) k = j; } sec5[48] = (k+7)/8; // number of bytes for extra and vmn } // scale the linked list s = start.tail; if (s == NULL) fatal_error("complex grib_out: program error 1",""); ngroups = 0; // number of groups while (s) { ngroups++; s = s->tail; } lens = (int *) malloc(((size_t) ngroups) * sizeof(int)); widths = (int *) malloc(((size_t) ngroups) * sizeof(int)); refs = (int *) malloc(((size_t) ngroups) * sizeof(int)); itmp = (int *) malloc(((size_t) ngroups) * sizeof(int)); itmp2 = (int *) malloc(((size_t) ngroups) * sizeof(int)); if (lens == NULL || widths == NULL || refs == NULL || itmp == NULL || itmp2 == NULL) fatal_error("complex grib_out: memory allocation",""); // printf("linked list ngroups=%d\n", ngroups); /* for (i = k = 0, s = start.tail; k < ngroups; k++, s=s->tail) { lens[k] = s->i1 - s->i0 + 1; i += lens[k]; refs[k] = s->mn; if (s->mn == INT_MAX) widths[k] = 0; else if (s->mn == s->mx) widths[k] = s->missing; else widths[k] = find_nbits(s->mx-s->mn+has_undef); } */ /* make vectors so we can OpenMP the loop */ for (i = ii = 0, s = start.tail; ii < ngroups; ii++, s=s->tail) { lens[ii] = s->i1 - s->i0 + 1; i += lens[ii]; refs[ii] = s->mn; itmp[ii] = s->mx; itmp2[ii] = s->missing; } if (i != nndata) fatal_error("complex grib_out: program error 2",""); #pragma omp parallel for private(i) for (i = 0; i < ngroups; i++) { if (refs[i] == INT_MAX) widths[i] = 0; else if (refs[i] == itmp[i]) widths[i] = itmp2[i]; else widths[i] = find_nbits(itmp[i]-refs[i]+has_undef); } // group lengths len_last = lens[ngroups-1]; // length of last segment glenmn = glenmx = lens[0]; gwidmx = gwidmn = widths[0]; grefmx = refs[0] != INT_MAX ? refs[0] : 0; /* for (k = 1; k < ngroups; k++) { glenmx = glenmx >= lens[k] ? glenmx : lens[k]; glenmn = glenmn <= lens[k] ? glenmn : lens[k]; gwidmx = gwidmx >= widths[k] ? gwidmx : widths[k]; gwidmn = gwidmn <= widths[k] ? gwidmn : widths[k]; if (refs[k] != INT_MAX && refs[k] > grefmx) grefmx = refs[k]; } */ #pragma omp parallel private(i) { int glenmn_thread, glenmx_thread, gwidmx_thread, gwidmn_thread, grefmx_thread; glenmn_thread = glenmx_thread = lens[0]; gwidmn_thread = gwidmx_thread = widths[0]; grefmx_thread = refs[0] != INT_MAX ? refs[0] : 0; #pragma omp for nowait for (i = 1; i < ngroups; i++) { glenmx_thread = glenmx_thread >= lens[i] ? glenmx_thread : lens[i]; glenmn_thread = glenmn_thread <= lens[i] ? glenmn_thread : lens[i]; gwidmx_thread = gwidmx_thread >= widths[i] ? gwidmx_thread : widths[i]; gwidmn_thread = gwidmn_thread <= widths[i] ? gwidmn_thread : widths[i]; if (refs[i] != INT_MAX && refs[i] > grefmx_thread) grefmx_thread = refs[i]; } #pragma omp critical { glenmx = glenmx >= glenmx_thread ? glenmx : glenmx_thread; glenmn = glenmn <= glenmn_thread ? glenmn : glenmn_thread; gwidmx = gwidmx >= gwidmx_thread ? gwidmx : gwidmx_thread; gwidmn = gwidmn <= gwidmn_thread ? gwidmn : gwidmn_thread; grefmx = grefmx >= grefmx_thread ? grefmx : grefmx_thread; } } sec5[19] = find_nbits(grefmx+has_undef); // sec5 definitions uint_char(packing_mode == 1 ? 47 : 49, sec5); // size of sec5 sec5[4] = 5; // section 5 uint_char(nndata, sec5+5); // number of points if (packing_mode == 1) uint2_char(2,sec5+9); // data template 2 else uint2_char(3,sec5+9); // data template 3 // same as grid template 5.0 flt2ieee((float) ref,sec5+11); // reference value int2_char(binary_scale,sec5+15); // binary scaling int2_char(-dec_scale,sec5+17); // decimal scaling sec5[20] = 0; // original = float // same as grid template 5.2 sec5[21] = 1; // general group splitting sec5[22] = has_undef; // primary missing values or no missing values flt2ieee((float) 9.999e20,sec5+23); // missing value sec5[27] = sec5[28] = sec5[29] = sec5[30] = 255; // secondary missing value uint_char(ngroups,sec5+31); // one group sec5[35] = gwidmn; // group width reference sec5[36] = find_nbits(gwidmx-gwidmn+has_undef); // group width bits #ifdef DEBUG printf("group widthmn = %d, gwidmx %d, width bits max %d\n", gwidmn, gwidmx, sec5[36]); #endif uint_char(glenmn,sec5+37); // group length ref sec5[41] = 1; // inc uint_char(len_last,sec5+42); // len of last group sec5[46] = find_nbits(glenmx-glenmn); // group length width if (packing_mode == 2) sec5[47] = 1; if (packing_mode == 3) sec5[47] = 2; // calculate the size of the data section // basic size size_sec7 = 5; // extra octets if (packing_mode == 2) size_sec7 += 2*sec5[48]; else if (packing_mode == 3) size_sec7 += 3*sec5[48]; // group reference value size_sec7 += (ngroups * sec5[19] + 7)/8; // group widths size_sec7 += (ngroups * sec5[36] + 7)/8; // group lengths size_sec7 += (ngroups * sec5[46] + 7)/8; // size of packed grid points for (i = k = 0; i < ngroups; i++) { j = lens[i] * widths[i] + k; size_sec7 += (j >> 3); k = (j & 7); } size_sec7 += k ? 1 : 0; #pragma omp parallel for private(i) for (i = 0; i < ngroups; i++) { refs[i] = (refs[i] != INT_MAX) ? refs[i] : ONES; itmp[i] = widths[i] - gwidmn; itmp2[i] = lens[i] - glenmn; } sec7 = (unsigned char *) malloc(size_sec7); if (sec7 == NULL) fatal_error("complex_grib_out memory allocation sec7",""); // pack the values into a bitstream init_bitstream(sec7); add_bitstream(size_sec7>>16,16); add_bitstream(size_sec7,16); add_bitstream(7,8); // write extra octets if (packing_mode == 2 || packing_mode == 3) { add_bitstream(extra_0,8*sec5[48]); if (packing_mode == 3) add_bitstream(extra_1,8*sec5[48]); k = vmn; if (k < 0) { k = -vmn | (1 << (8*sec5[48]-1)); } add_bitstream(k,8*sec5[48]); finish_bitstream(); } // write the group reference values add_many_bitstream(refs, ngroups, sec5[19]); finish_bitstream(); // write the group widths add_many_bitstream(itmp, ngroups, sec5[36]); finish_bitstream(); // write the group lengths add_many_bitstream(itmp2, ngroups, sec5[46]); finish_bitstream(); // write the data /* s = start.tail; for (ii = 0; ii < ngroups; ii++, s=s->tail) { // number of bits to pack if (widths[ii]) { // mask = (1 << widths[ii]) - 1; for (j = 0; j < lens[ii]; j++) { v[j+s->i0] = (v[j+s->i0] == INT_MAX) ? ONES : v[j+s->i0] - s->mn; } add_many_bitstream(v+s->i0, lens[ii], widths[ii]); } } */ s = start.tail; for (i = 0; i < ngroups; i++, s=s->tail) { itmp[i] = s->i0; refs[i] = s->mn; } #pragma omp parallel for private(i,j) for (i = 0; i < ngroups; i++) { if (widths[i]) { for (j = 0; j < lens[i]; j++) { v[j+itmp[i]] = (v[j+itmp[i]] == INT_MAX) ? ONES : v[j+itmp[i]] - refs[i]; } } } for (i = 0; i < ngroups; i++) { if (widths[i]) { add_many_bitstream(v+itmp[i], lens[i], widths[i]); } } finish_bitstream(); j = wrt_sec(sec0, sec1, sec2, sec3, sec4, sec5, sec6, sec7, out); free(sec5); free(sec6); free(sec7); free(list); free(v); free(lens); free(widths); free(refs); free(itmp); return j; }
FourierTransform.h
/* Copyright 2016 Kristofer Björnson * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @package TBTKcalc * @file FourierTransform.h * @brief Fourier transform * * @author Kristofer Björnson */ #ifndef COM_DAFER45_TBTK_FOURIER_TRANSFORM #define COM_DAFER45_TBTK_FOURIER_TRANSFORM #include "TBTK/CArray.h" #include "TBTK/Index.h" #include <fftw3.h> #include <complex> #include <vector> namespace TBTK{ /** @brief Fourier transform * * # Example * \snippet FourierTransform/FourierTransform.cpp FourierTransform * ## Output * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransform1D.png * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransform2DReal.png * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransform2DImaginary.png * \image html output/FourierTransform/FourierTransform/figures/FourierTransformFourierTransformWithPlan.png */ class FourierTransform{ public: /** Plan for executing the Fourier-transform. */ template<typename DataType> class Plan{ public: /** Constructor. */ Plan( const CArray<DataType> &in, CArray<DataType> &out, const std::vector<unsigned int> &ranges, int sign ); /** Copy constructor. */ Plan(const Plan &plan) = delete; /** Move constructor. */ Plan(Plan &&plan); /** Destructor. */ ~Plan(); /** Assignment operator. */ Plan& operator=(const Plan &plan) = delete; /** Move assignment operator. */ Plan& operator=(Plan &&plan); /** Set normalization factor. */ void setNormalizationFactor(double normalizationFactor); /** Get normalizationFactor. */ double getNormalizationFactor() const; private: /** FFTW3 plan. */ fftw_plan *plan; /** Normalization factor. */ double normalizationFactor; /** Data size. */ unsigned int size; /** Input data. */ const CArray<DataType> &input; /** Output data. */ CArray<DataType> &output; /** Get FFTW3 plan. */ fftw_plan& getFFTWPlan(); /** Get data size. */ unsigned int getSize() const; /** Get input data. */ CArray<DataType>& getInput(); /** Get output data. */ CArray<DataType>& getOutput(); /** Make FourierTransform a friend class. */ friend class FourierTransform; }; /** Plan for executing forward Fourier-transform. */ template<typename DataType> class ForwardPlan : public Plan<DataType>{ public: /** Constructor. */ ForwardPlan( const CArray<DataType> &in, CArray<DataType> &out, const std::vector<unsigned int> &ranges ) : Plan<DataType>(in, out, ranges, -1){} }; /** Plan for executing inverse Fourier-transform. */ template<typename DataType> class InversePlan : public Plan<DataType>{ public: /** Constructor. */ InversePlan( const CArray<DataType> &in, CArray<DataType> &out, const std::vector<unsigned int> &ranges ) : Plan<DataType>(in, out, ranges, 1 ){} }; /** N-dimensional complex Fourier transform. * * @param in Input data. * @param out Output data. * @param ranges The dimensions of the data. * @param sign The sign to use in the exponent of the Fourier * transform. */ static void transform( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges, int sign ); /** Execute a planned transform. * * @param plan The plan to execute. */ template<typename DataType> static void transform(Plan<DataType> &plan); /** N-dimensional complex forward Fourier transform. * * @param in Input data. * @param out Output data. * @param ranges The dimensions of the data. */ static void forward( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ); /** N-dimensional complex inverse Fourier transform. * * @param in Input data. * @param out Output data. * @param ranges The dimensions of the data. */ static void inverse( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ); private: }; template<typename DataType> inline void FourierTransform::transform(Plan<DataType> &plan){ fftw_execute(plan.getFFTWPlan()); double normalizationFactor = plan.getNormalizationFactor(); if(normalizationFactor != 1.){ CArray<DataType> &output = plan.getOutput(); for(unsigned int n = 0; n < plan.getSize(); n++) output[n] /= normalizationFactor; } } inline void FourierTransform::forward( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ){ transform(in, out, ranges, -1); } inline void FourierTransform::inverse( const CArray<std::complex<double>> &in, CArray<std::complex<double>> &out, const std::vector<unsigned int> &ranges ){ transform(in, out, ranges, 1); } template<typename DataType> inline FourierTransform::Plan<DataType>::Plan(Plan &&plan){ this->plan = plan.plan; plan.plan = nullptr; normalizationFactor = plan.normalizationFactor; size = plan.size; input = plan.input; output = plan.output; } template<typename DataType> inline FourierTransform::Plan<DataType>::~Plan(){ if(plan != nullptr){ #pragma omp critical (TBTK_FOURIER_TRANSFORM) fftw_destroy_plan(*plan); delete plan; } } template<typename DataType> inline FourierTransform::Plan<DataType>& FourierTransform::Plan< DataType >::operator=(Plan &&rhs){ if(this != &rhs){ if(this->plan != nullptr){ #pragma omp critical (TBTK_FOURIER_TRANSFORM) fftw_destroy_plan(*this->plan); delete this->plan; this->plan = rhs.plan; normalizationFactor = rhs.normalizationFactor; size = rhs.size; input = rhs.input; output = rhs.output; } } return *this; } template<typename DataType> inline void FourierTransform::Plan<DataType>::setNormalizationFactor( double normalizationFactor ){ this->normalizationFactor = normalizationFactor; } template<typename DataType> inline double FourierTransform::Plan<DataType>::getNormalizationFactor() const{ return normalizationFactor; } template<typename DataType> inline fftw_plan& FourierTransform::Plan<DataType>::getFFTWPlan(){ return *plan; } template<typename DataType> inline unsigned int FourierTransform::Plan<DataType>::getSize() const{ return size; } template<typename DataType> inline CArray<DataType>& FourierTransform::Plan<DataType>::getInput(){ return input; } template<typename DataType> inline CArray<DataType>& FourierTransform::Plan<DataType>::getOutput(){ return output; } }; //End of namespace TBTK #endif
nvector_openmp.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v) { return SUNDIALS_NVEC_OPENMP; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads) { N_Vector v; N_VectorContent_OpenMP content; /* Create vector */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_OpenMP; v->ops->nvclone = N_VClone_OpenMP; v->ops->nvcloneempty = N_VCloneEmpty_OpenMP; v->ops->nvdestroy = N_VDestroy_OpenMP; v->ops->nvspace = N_VSpace_OpenMP; v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP; v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP; v->ops->nvgetlength = N_VGetLength_OpenMP; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_OpenMP; v->ops->nvconst = N_VConst_OpenMP; v->ops->nvprod = N_VProd_OpenMP; v->ops->nvdiv = N_VDiv_OpenMP; v->ops->nvscale = N_VScale_OpenMP; v->ops->nvabs = N_VAbs_OpenMP; v->ops->nvinv = N_VInv_OpenMP; v->ops->nvaddconst = N_VAddConst_OpenMP; v->ops->nvdotprod = N_VDotProd_OpenMP; v->ops->nvmaxnorm = N_VMaxNorm_OpenMP; v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP; v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP; v->ops->nvmin = N_VMin_OpenMP; v->ops->nvwl2norm = N_VWL2Norm_OpenMP; v->ops->nvl1norm = N_VL1Norm_OpenMP; v->ops->nvcompare = N_VCompare_OpenMP; v->ops->nvinvtest = N_VInvTest_OpenMP; v->ops->nvconstrmask = N_VConstrMask_OpenMP; v->ops->nvminquotient = N_VMinQuotient_OpenMP; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction kernels */ v->ops->nvdotprodlocal = N_VDotProd_OpenMP; v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP; v->ops->nvminlocal = N_VMin_OpenMP; v->ops->nvl1normlocal = N_VL1Norm_OpenMP; v->ops->nvinvtestlocal = N_VInvTest_OpenMP; v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP; v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_OpenMP; v->ops->nvbufpack = N_VBufPack_OpenMP; v->ops->nvbufunpack = N_VBufUnpack_OpenMP; /* Create content */ content = NULL; content = (N_VectorContent_OpenMP) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->num_threads = num_threads; content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMP(sunindextype length, int num_threads) { N_Vector v; realtype *data; v = NULL; v = N_VNewEmpty_OpenMP(length, num_threads); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMP(v) = SUNTRUE; NV_DATA_OMP(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads) { N_Vector v; v = NULL; v = N_VNewEmpty_OpenMP(length, num_threads); if (v == NULL) return(NULL); if (length > 0) { /* Attach data */ NV_OWN_DATA_OMP(v) = SUNFALSE; NV_DATA_OMP(v) = v_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w) { N_Vector* vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector*) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VClone_OpenMP(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMP(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w) { N_Vector* vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector*) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VCloneEmpty_OpenMP(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMP(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMP */ void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count) { int j; for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]); free(vs); vs = NULL; return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMP(N_Vector v) { return NV_LENGTH_OMP(v); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMP(N_Vector x) { N_VPrintFile_OpenMP(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%11.8g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMP(N_Vector w) { N_Vector v; N_VectorContent_OpenMP content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_OpenMP) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_OMP(w); content->num_threads = NV_NUM_THREADS_OMP(w); content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMP(N_Vector w) { N_Vector v; realtype *data; sunindextype length; v = NULL; v = N_VCloneEmpty_OpenMP(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMP(w); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMP(v) = SUNTRUE; NV_DATA_OMP(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMP(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data array if it's owned by the vector */ if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) { free(NV_DATA_OMP(v)); NV_DATA_OMP(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMP(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Get vector data pointer */ realtype *N_VGetArrayPointer_OpenMP(N_Vector v) { return((realtype *) NV_DATA_OMP(v)); } /* ---------------------------------------------------------------------------- * Set vector data pointer */ void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v) { if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd, *yd, *zd; N_Vector v1, v2; booleantype test; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMP(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMP(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMP(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMP(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMP(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMP(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMP(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMP(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])+(b*yd[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMP(realtype c, N_Vector z) { sunindextype i, N; realtype *zd; i = 0; /* initialize to suppress clang warning */ zd = NULL; N = NV_LENGTH_OMP(z); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(z)) for (i = 0; i < N; i++) zd[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]*yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]/yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMP(c, x); return; } if (c == ONE) { VCopy_OpenMP(x, z); } else if (c == -ONE) { VNeg_OpenMP(x, z); } else { N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*xd[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = SUNRabs(xd[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = ONE/xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd, *yd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = yd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); #pragma omp parallel for default(none) private(i) shared(N,xd,yd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += xd[i]*yd[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMP(N_Vector x) { sunindextype i, N; realtype tmax, max, *xd; i = 0; /* initialize to suppress clang warning */ max = ZERO; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { tmax = ZERO; #pragma omp for schedule(static) for (i = 0; i < N; i++) { if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]); } #pragma omp critical { if (tmax > max) max = tmax; } } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x)))); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMP(N_Vector x) { sunindextype i, N; realtype min, *xd; realtype tmin; i = 0; /* initialize to suppress clang warning */ xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); min = xd[0]; #pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { tmin = xd[0]; #pragma omp for schedule(static) for (i = 1; i < N; i++) { if (xd[i] < tmin) tmin = xd[i]; } if (tmin < min) { #pragma omp critical { if (tmin < min) min = tmin; } } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd, *wd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = wd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); #pragma omp parallel for default(none) private(i) shared(N,xd,wd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += SUNSQR(xd[i]*wd[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMP(N_Vector x) { sunindextype i, N; realtype sum, *xd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel for default(none) private(i) shared(N,xd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i<N; i++) sum += SUNRabs(xd[i]); return(sum); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO; } return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd, val; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); val = ZERO; #pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { if (xd[i] == ZERO) val = ONE; else zd[i] = ONE/xd[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd, *xd, *md; booleantype test; i = 0; /* initialize to suppress clang warning */ cd = xd = md = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); cd = NV_DATA_OMP(c); md = NV_DATA_OMP(m); temp = ZERO; #pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \ schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cd[i] == ZERO) continue; /* Check if a set constraint has been violated */ test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) || (SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO); if (test) { temp = md[i] = ONE; /* Here is a race to write to temp */ } } /* Return false if any constraint was violated */ return (temp == ONE) ? SUNFALSE : SUNTRUE; } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd, *dd, min, tmin, val; i = 0; /* initialize to suppress clang warning */ nd = dd = NULL; N = NV_LENGTH_OMP(num); nd = NV_DATA_OMP(num); dd = NV_DATA_OMP(denom); min = BIG_REAL; #pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \ num_threads(NV_NUM_THREADS_OMP(num)) { tmin = BIG_REAL; #pragma omp for schedule(static) for (i = 0; i < N; i++) { if (dd[i] != ZERO) { val = nd[i]/dd[i]; if (val < tmin) tmin = val; } } if (tmin < min) { #pragma omp critical { if (tmin < min) min = tmin; } } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd, *wd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = wd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); #pragma omp parallel for default(none) private(i) shared(N,xd,wd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += SUNSQR(xd[i]*wd[i]); } return(sum); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd, *wd, *idd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = wd = idd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); idd = NV_DATA_OMP(id); #pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { if (idd[i] > ZERO) { sum += SUNSQR(xd[i]*wd[i]); } } return(sum); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i; sunindextype j, N; realtype* zd=NULL; realtype* xd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMP(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMP(z); zd = NV_DATA_OMP(z); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { #pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \ num_threads(NV_NUM_THREADS_OMP(z)) { for (i=1; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] += c[i] * xd[j]; } } } return(0); } /* * X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1 */ if (X[0] == z) { #pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \ num_threads(NV_NUM_THREADS_OMP(z)) { #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] *= c[0]; } for (i=1; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] += c[i] * xd[j]; } } } return(0); } /* * z = sum{ c[i] * X[i] }, i = 0,...,nvec-1 */ #pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \ num_threads(NV_NUM_THREADS_OMP(z)) { xd = NV_DATA_OMP(X[0]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] = c[0] * xd[j]; } for (i=1; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] += c[i] * xd[j]; } } } return(0); } int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMP(a[0], x, ONE, Y[0], Z[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp parallel default(none) private(i,j,yd) shared(nvec,Y,N,a,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { for (i=0; i<nvec; i++) { yd = NV_DATA_OMP(Y[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { yd[j] += a[i] * xd[j]; } } } return(0); } /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp parallel default(none) private(i,j,yd,zd) shared(nvec,Y,Z,N,a,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { for (i=0; i<nvec; i++) { yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] = a[i] * xd[j] + yd[j]; } } } return(0); } int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i; sunindextype j, N; realtype sum; realtype* xd=NULL; realtype* yd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VDotProd */ if (nvec == 1) { dotprods[0] = N_VDotProd_OpenMP(x, Y[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); /* initialize dot products */ for (i=0; i<nvec; i++) { dotprods[i] = ZERO; } /* compute multiple dot products */ #pragma omp parallel default(none) private(i,j,yd,sum) shared(nvec,Y,N,xd,dotprods) \ num_threads(NV_NUM_THREADS_OMP(x)) { for (i=0; i<nvec; i++) { yd = NV_DATA_OMP(Y[i]); sum = ZERO; #pragma omp for schedule(static) for (j=0; j<N; j++) { sum += xd[j] * yd[j]; } #pragma omp critical { dotprods[i] += sum; } } } return(0); } /* * ----------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------- */ int N_VLinearSumVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; realtype c; N_Vector* V1; N_Vector* V2; booleantype test; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMP(a, X[0], b, Y[0], Z[0]); return(0); } /* BLAS usage: axpy y <- ax+y */ if ((b == ONE) && (Z == Y)) return(VaxpyVectorArray_OpenMP(nvec, a, X, Y)); /* BLAS usage: axpy x <- by+x */ if ((a == ONE) && (Z == X)) return(VaxpyVectorArray_OpenMP(nvec, b, Y, X)); /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) return(VSumVectorArray_OpenMP(nvec, X, Y, Z)); /* Cases: */ /* (1) a == 1.0, b = -1.0, */ /* (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { V1 = test ? Y : X; V2 = test ? X : Y; return(VDiffVectorArray_OpenMP(nvec, V2, V1, Z)); } /* Cases: */ /* (1) a == 1.0, b == other or 0.0, */ /* (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin1VectorArray_OpenMP(nvec, c, V1, V2, Z)); } /* Cases: */ /* (1) a == -1.0, b != 1.0, */ /* (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin2VectorArray_OpenMP(nvec, c, V1, V2, Z)); } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) return(VScaleSumVectorArray_OpenMP(nvec, a, X, Y, Z)); /* Case: a == -b */ if (a == -b) return(VScaleDiffVectorArray_OpenMP(nvec, a, X, Y, Z)); /* Do all cases not handled above: */ /* (1) a == other, b == 0.0 - user should have called N_VScale */ /* (2) a == 0.0, b == other - user should have called N_VScale */ /* (3) a,b == other, a !=b, a != -b */ /* get vector length */ N = NV_LENGTH_OMP(Z[0]); /* compute linear sum for each vector pair in vector arrays */ #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a,b) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] = a * xd[j] + b * yd[j]; } } } return(0); } int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMP(c[0], X[0], Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMP(Z[0]); /* * X[i] *= c[i] */ if (X == Z) { #pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { xd[j] *= c[i]; } } } return(0); } /* * Z[i] = c[i] * X[i] */ #pragma omp parallel default(none) private(i,j,xd,zd) shared(nvec,X,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] = c[i] * xd[j]; } } } return(0); } int N_VConstVectorArray_OpenMP(int nvec, realtype c, N_Vector* Z) { int i; sunindextype j, N; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VConst */ if (nvec == 1) { N_VConst_OpenMP(c, Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMP(Z[0]); /* set each vector in the vector array to a constant */ #pragma omp parallel default(none) private(i,j,zd) shared(nvec,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (i=0; i<nvec; i++) { zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) { zd[j] = c; } } } return(0); } int N_VWrmsNormVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { int i; sunindextype j, N; realtype sum; realtype* wd=NULL; realtype* xd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNorm_OpenMP(X[0], W[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMP(X[0]); /* initialize norms */ for (i=0; i<nvec; i++) { nrm[i] = ZERO; } /* compute the WRMS norm for each vector in the vector array */ #pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,nrm) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); wd = NV_DATA_OMP(W[i]); sum = ZERO; #pragma omp for schedule(static) for (j=0; j<N; j++) { sum += SUNSQR(xd[j] * wd[j]); } #pragma omp critical { nrm[i] += sum; } } } for (i=0; i<nvec; i++) { nrm[i] = SUNRsqrt(nrm[i]/N); } return(0); } int N_VWrmsNormMaskVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm) { int i; sunindextype j, N; realtype sum; realtype* wd=NULL; realtype* xd=NULL; realtype* idd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNormMask_OpenMP(X[0], W[0], id); return(0); } /* get vector length and mask data array */ N = NV_LENGTH_OMP(X[0]); idd = NV_DATA_OMP(id); /* initialize norms */ for (i=0; i<nvec; i++) { nrm[i] = ZERO; } /* compute the WRMS norm for each vector in the vector array */ #pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,idd,nrm) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); wd = NV_DATA_OMP(W[i]); sum = ZERO; #pragma omp for schedule(static) for (j=0; j<N; j++) { if (idd[j] > ZERO) sum += SUNSQR(xd[j] * wd[j]); } #pragma omp critical { nrm[i] += sum; } } } for (i=0; i<nvec; i++) { nrm[i] = SUNRsqrt(nrm[i]/N); } return(0); } int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j; sunindextype k, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; int retval; N_Vector* YY; N_Vector* ZZ; i = 0; /* initialize to suppress clang warning */ k = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_OpenMP(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector*) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector)); for (j=0; j<nsum; j++) { YY[j] = Y[j][0]; ZZ[j] = Z[j][0]; } retval = N_VScaleAddMulti_OpenMP(nsum, a, X[0], YY, ZZ); free(YY); free(ZZ); return(retval); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMP(X[0]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); for (j=0; j<nsum; j++) { yd = NV_DATA_OMP(Y[j][i]); #pragma omp for schedule(static) for (k=0; k<N; k++) { yd[k] += a[j] * xd[k]; } } } } return(0); } /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp parallel default(none) private(i,j,k,xd,yd,zd) shared(nvec,nsum,X,Y,Z,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); for (j=0; j<nsum; j++) { yd = NV_DATA_OMP(Y[j][i]); zd = NV_DATA_OMP(Z[j][i]); #pragma omp for schedule(static) for (k=0; k<N; k++) { zd[k] = a[j] * xd[k] + yd[k]; } } } } return(0); } int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { int i; /* vector arrays index in summation [0,nsum) */ int j; /* vector index in vector array [0,nvec) */ sunindextype k; /* element index in vector [0,N) */ sunindextype N; realtype* zd=NULL; realtype* xd=NULL; realtype* ctmp; N_Vector* Y; i = 0; /* initialize to suppress clang warning */ k = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VScale */ if (nsum == 1) { N_VScale_OpenMP(c[0], X[0][0], Z[0]); return(0); } /* should have called N_VLinearSum */ if (nsum == 2) { N_VLinearSum_OpenMP(c[0], X[0][0], c[1], X[1][0], Z[0]); return(0); } /* should have called N_VLinearCombination */ Y = (N_Vector*) malloc(nsum * sizeof(N_Vector)); for (i=0; i<nsum; i++) { Y[i] = X[i][0]; } N_VLinearCombination_OpenMP(nsum, c, Y, Z[0]); free(Y); return(0); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; j<nvec; j++) { ctmp[j] = c[0]; } N_VScaleVectorArray_OpenMP(nvec, ctmp, X[0], Z); free(ctmp); return(0); } /* should have called N_VLinearSumVectorArray */ if (nsum == 2) { N_VLinearSumVectorArray_OpenMP(nvec, c[0], X[0], c[1], X[1], Z); return(0); } /* -------------------------- * Compute linear combination * -------------------------- */ /* get vector length */ N = NV_LENGTH_OMP(Z[0]); /* * X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1 */ if ((X[0] == Z) && (c[0] == ONE)) { #pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (j=0; j<nvec; j++) { zd = NV_DATA_OMP(Z[j]); for (i=1; i<nsum; i++) { xd = NV_DATA_OMP(X[i][j]); #pragma omp for schedule(static) for (k=0; k<N; k++) { zd[k] += c[i] * xd[k]; } } } } return(0); } /* * X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1 */ if (X[0] == Z) { #pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (j=0; j<nvec; j++) { zd = NV_DATA_OMP(Z[j]); #pragma omp for schedule(static) for (k=0; k<N; k++) { zd[k] *= c[0]; } for (i=1; i<nsum; i++) { xd = NV_DATA_OMP(X[i][j]); #pragma omp for schedule(static) for (k=0; k<N; k++) { zd[k] += c[i] * xd[k]; } } } } return(0); } /* * Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1 */ #pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(Z[0])) { for (j=0; j<nvec; j++) { /* scale first vector in the sum into the output vector */ xd = NV_DATA_OMP(X[0][j]); zd = NV_DATA_OMP(Z[j]); #pragma omp for schedule(static) for (k=0; k<N; k++) { zd[k] = c[0] * xd[k]; } /* scale and sum remaining vectors into the output vector */ for (i=1; i<nsum; i++) { xd = NV_DATA_OMP(X[i][j]); #pragma omp for schedule(static) for (k=0; k<N; k++) { zd[k] += c[i] * xd[k]; } } } } return(0); } /* * ----------------------------------------------------------------- * OPTIONAL XBraid interface operations * ----------------------------------------------------------------- */ int N_VBufSize_OpenMP(N_Vector x, sunindextype *size) { if (x == NULL) return(-1); *size = NV_LENGTH_OMP(x) * ((sunindextype)sizeof(realtype)); return(0); } int N_VBufPack_OpenMP(N_Vector x, void *buf) { sunindextype i, N; realtype *xd = NULL; realtype *bd = NULL; if (x == NULL || buf == NULL) return(-1); N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); bd = (realtype*) buf; #pragma omp for schedule(static) for (i = 0; i < N; i++) bd[i] = xd[i]; return(0); } int N_VBufUnpack_OpenMP(N_Vector x, void *buf) { sunindextype i, N; realtype *xd = NULL; realtype *bd = NULL; if (x == NULL || buf == NULL) return(-1); N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); bd = (realtype*) buf; #pragma omp for schedule(static) for (i = 0; i < N; i++) xd[i] = bd[i]; return(0); } /* * ----------------------------------------------------------------- * private functions for special cases of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into a second vector */ static void VCopy_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]+yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]-yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = -xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*(xd[i]+yd[i]); return; } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*(xd[i]-yd[i]); return; } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])+yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])-yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y) { sunindextype i, N; realtype *xd, *yd; i = 0; /* initialize to suppress clang warning */ xd = yd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); if (a == ONE) { #pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) yd[i] += xd[i]; return; } if (a == -ONE) { #pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) yd[i] -= xd[i]; return; } #pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) yd[i] += a*xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector x[i] = a*x[i] */ static void VScaleBy_OpenMP(realtype a, N_Vector x) { sunindextype i, N; realtype *xd; i = 0; /* initialize to suppress clang warning */ xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) xd[i] *= a; return; } /* * ----------------------------------------------------------------- * private functions for special cases of vector array operations * ----------------------------------------------------------------- */ static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) zd[j] = xd[j] + yd[j]; } } return(0); } static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) zd[j] = xd[j] - yd[j]; } } return(0); } static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) zd[j] = c * (xd[j] + yd[j]); } } return(0); } static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) zd[j] = c * (xd[j] - yd[j]); } } return(0); } static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) zd[j] = (a * xd[j]) + yd[j]; } } return(0); } static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); #pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); zd = NV_DATA_OMP(Z[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) zd[j] = (a * xd[j]) - yd[j]; } } return(0); } static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y) { int i; sunindextype j, N; realtype* xd=NULL; realtype* yd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; N = NV_LENGTH_OMP(X[0]); if (a == ONE) { #pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) yd[j] += xd[j]; } } return(0); } if (a == -ONE) { #pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) yd[j] -= xd[j]; } } return(0); } #pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i<nvec; i++) { xd = NV_DATA_OMP(X[i]); yd = NV_DATA_OMP(Y[i]); #pragma omp for schedule(static) for (j=0; j<N; j++) yd[j] += a * xd[j]; } } return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMP; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMP; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP; else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); }
fmm_base.h
#ifndef fmm_base_h #define fmm_base_h #include <algorithm> // std::fill #include "exafmm_t.h" #include "geometry.h" #include "timer.h" namespace exafmm_t { //! Base FMM class template <typename T> class FmmBase { public: int p; //!< Order of expansion int nsurf; //!< Number of points on equivalent / check surface int nconv; //!< Number of points on convolution grid int nfreq; //!< Number of coefficients in DFT (depending on whether T is real_t) int ncrit; //!< Max number of bodies per leaf int depth; //!< Depth of the tree real_t r0; //!< Half of the side length of the bounding box vec3 x0; //!< Coordinates of the center of root box bool is_precomputed; //!< Whether the matrix file is found bool is_real; //!< Whether template parameter T is real_t std::string filename; //!< File name of the precomputation matrices FmmBase() {} FmmBase(int p_, int ncrit_, std::string filename_=std::string()) : p(p_), ncrit(ncrit_), filename(filename_) { nsurf = 6*(p_-1)*(p_-1) + 2; int n1 = 2 * p_; nconv = n1 * n1 * n1; is_real = std::is_same<T, real_t>::value; nfreq = is_real ? n1*n1*(n1/2+1) : nconv; is_precomputed = false; } virtual void potential_P2P(RealVec& src_coord, std::vector<T>& src_value, RealVec& trg_coord, std::vector<T>& trg_value) = 0; virtual void gradient_P2P(RealVec& src_coord, std::vector<T>& src_value, RealVec& trg_coord, std::vector<T>& trg_value) = 0; //! M2L operator. virtual void M2L(Nodes<T>& nodes) = 0; //! M2M operator. virtual void M2M(Node<T>* node) = 0; //! L2L operator. virtual void L2L(Node<T>* node) = 0; //! P2M operator. virtual void P2M(NodePtrs<T>& leafs) = 0; //! L2P operator. virtual void L2P(NodePtrs<T>& leafs) = 0; /** * @brief Compute the kernel matrix of a given kernel. * * @param src_coord Vector of source coordinates. * @param trg_coord Vector of target coordinates. * @param matrix Kernel matrix. */ void kernel_matrix(RealVec& src_coord, RealVec& trg_coord, std::vector<T>& matrix) { std::vector<T> src_value(1, 1.); // use unit weight to generate kernel matrix int nsrcs = src_coord.size() / 3; int ntrgs = trg_coord.size() / 3; #pragma omp parallel for for (int i=0; i<nsrcs; i++) { RealVec src_coord_(src_coord.data()+3*i, src_coord.data()+3*(i+1)); std::vector<T> trg_value_(ntrgs, 0.); potential_P2P(src_coord_, src_value, trg_coord, trg_value_); std::copy(trg_value_.begin(), trg_value_.end(), matrix.data()+i*ntrgs); } } /* the following kernels do not use precomputation matrices * thus can be defined in the base class */ //! P2P operator. void P2P(NodePtrs<T>& leafs) { NodePtrs<T>& targets = leafs; #pragma omp parallel for for (size_t i=0; i<targets.size(); i++) { Node<T>* target = targets[i]; NodePtrs<T>& sources = target->P2P_list; for (size_t j=0; j<sources.size(); j++) { Node<T>* source = sources[j]; gradient_P2P(source->src_coord, source->src_value, target->trg_coord, target->trg_value); } } } //! M2P operator. void M2P(NodePtrs<T>& leafs) { NodePtrs<T>& targets = leafs; real_t c[3] = {0.0}; std::vector<RealVec> up_equiv_surf; up_equiv_surf.resize(depth+1); for (int level=0; level<=depth; level++) { up_equiv_surf[level].resize(nsurf*3); up_equiv_surf[level] = surface(p, r0, level, c, 1.05); } #pragma omp parallel for for (size_t i=0; i<targets.size(); i++) { Node<T>* target = targets[i]; NodePtrs<T>& sources = target->M2P_list; for (size_t j=0; j<sources.size(); j++) { Node<T>* source = sources[j]; RealVec src_equiv_coord(nsurf*3); int level = source->level; // source node's equiv coord = relative equiv coord + node's center for (int k=0; k<nsurf; k++) { src_equiv_coord[3*k+0] = up_equiv_surf[level][3*k+0] + source->x[0]; src_equiv_coord[3*k+1] = up_equiv_surf[level][3*k+1] + source->x[1]; src_equiv_coord[3*k+2] = up_equiv_surf[level][3*k+2] + source->x[2]; } gradient_P2P(src_equiv_coord, source->up_equiv, target->trg_coord, target->trg_value); } } } //! P2L operator. void P2L(Nodes<T>& nodes) { Nodes<T>& targets = nodes; real_t c[3] = {0.0}; std::vector<RealVec> dn_check_surf; dn_check_surf.resize(depth+1); for (int level=0; level<=depth; level++) { dn_check_surf[level].resize(nsurf*3); dn_check_surf[level] = surface(p, r0, level, c, 1.05); } #pragma omp parallel for for (size_t i=0; i<targets.size(); i++) { Node<T>* target = &targets[i]; NodePtrs<T>& sources = target->P2L_list; for (size_t j=0; j<sources.size(); j++) { Node<T>* source = sources[j]; RealVec trg_check_coord(nsurf*3); int level = target->level; // target node's check coord = relative check coord + node's center for (int k=0; k<nsurf; k++) { trg_check_coord[3*k+0] = dn_check_surf[level][3*k+0] + target->x[0]; trg_check_coord[3*k+1] = dn_check_surf[level][3*k+1] + target->x[1]; trg_check_coord[3*k+2] = dn_check_surf[level][3*k+2] + target->x[2]; } potential_P2P(source->src_coord, source->src_value, trg_check_coord, target->dn_equiv); } } } /** * @brief Evaluate upward equivalent charges for all nodes in a post-order traversal. * * @param nodes Vector of all nodes. * @param leafs Vector of pointers to leaf nodes. */ void upward_pass(Nodes<T>& nodes, NodePtrs<T>& leafs, bool verbose=true) { start("P2M"); P2M(leafs); stop("P2M", verbose); start("M2M"); #pragma omp parallel #pragma omp single nowait M2M(&nodes[0]); stop("M2M", verbose); } /** * @brief Evaluate potentials and gradients for all targets in a pre-order traversal. * * @param nodes Vector of all nodes. * @param leafs Vector of pointers to leaf nodes. */ void downward_pass(Nodes<T>& nodes, NodePtrs<T>& leafs, bool verbose=true) { start("P2L"); P2L(nodes); stop("P2L", verbose); start("M2P"); M2P(leafs); stop("M2P", verbose); start("P2P"); P2P(leafs); stop("P2P", verbose); start("M2L"); M2L(nodes); stop("M2L", verbose); start("L2L"); #pragma omp parallel #pragma omp single nowait L2L(&nodes[0]); stop("L2L", verbose); start("L2P"); L2P(leafs); stop("L2P", verbose); } /** * @brief Check FMM accuracy. * * @param leafs Vector of leaves. * @return The relative error of potential and gradient in L2 norm. */ RealVec verify(NodePtrs<T>& leafs, bool sample=false) { Nodes<T> targets; // vector of target nodes if (sample) { int nsamples = 10; int stride = leafs.size() / nsamples; for (int i=0; i<nsamples; i++) { targets.push_back(*(leafs[i*stride])); } } else { // compute all values directly without sampling for (size_t i=0; i<leafs.size(); i++) { targets.push_back(*leafs[i]); } } Nodes<T> targets2 = targets; // target2 is used for direct summation #pragma omp parallel for for (size_t i=0; i<targets2.size(); i++) { Node<T>* target = &targets2[i]; std::fill(target->trg_value.begin(), target->trg_value.end(), 0.); for (size_t j=0; j<leafs.size(); j++) { gradient_P2P(leafs[j]->src_coord, leafs[j]->src_value, target->trg_coord, target->trg_value); } } // relative error in L-infinity norm /* double potential_max_err = -1; double gradient_max_err = -1; for (size_t i=0; i<targets.size(); i++) { for (int j=0; j<targets[i].ntrgs; j++) { auto potential_diff = std::abs(targets[i].trg_value[4*j+0] - targets2[i].trg_value[4*j+0]); auto potential_norm = std::abs(targets2[i].trg_value[4*j+0]); potential_max_err = std::max(potential_max_err, potential_diff/potential_norm); for (int d=1; d<4; d++) { auto gradient_diff = std::abs(targets[i].trg_value[4*j+d] - targets2[i].trg_value[4*j+d]); auto gradient_norm = std::abs(targets2[i].trg_value[4*j+d]); gradient_max_err = std::max(gradient_max_err, gradient_diff/gradient_norm); } } } */ // relative error in L2 norm double p_diff = 0, p_norm = 0, g_diff = 0, g_norm = 0; for (size_t i=0; i<targets.size(); i++) { for (int j=0; j<targets[i].ntrgs; j++) { p_norm += std::norm(targets2[i].trg_value[4*j+0]); p_diff += std::norm(targets2[i].trg_value[4*j+0] - targets[i].trg_value[4*j+0]); for (int d=1; d<4; d++) { g_diff += std::norm(targets2[i].trg_value[4*j+d] - targets[i].trg_value[4*j+d]); g_norm += std::norm(targets2[i].trg_value[4*j+d]); } } } RealVec err(2); err[0] = sqrt(p_diff/p_norm); // potential error in L2 norm err[1] = sqrt(g_diff/g_norm); // gradient error in L2 norm return err; } }; } // end namespace #endif
convolution_sgemm_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD void im2col_sgemm_int8_neon_asimddp(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); void convolution_im2col_sgemm_transform_kernel_int8_neon_asimddp(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h); #endif static void im2col_sgemm_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { im2col_sgemm_int8_neon_asimddp(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 16) tmp.create(16 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator); } else if (inch >= 4) { if (size >= 16) tmp.create(16 * maxk, inch / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); } else { if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator); } #else // __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 4) tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator); } else if (inch >= 4) { if (size >= 4) tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); } else { if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (inch >= 8) { if (size >= 2) tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator); } else if (inch >= 4) { if (size >= 2) tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator); else tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator); } else { if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "ld1 {v4.16b}, [%4] \n" "ld1 {v5.16b}, [%5] \n" "ld1 {v6.16b}, [%6] \n" "ld1 {v7.16b}, [%7] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n" "st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%4], #64 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(tmpptr) // %4 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v5.8b}, [%5] \n" "ld1 {v6.8b}, [%6] \n" "ld1 {v7.8b}, [%7] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n" "st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(tmpptr) // %4 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img4[2]; tmpptr[1] = img5[2]; tmpptr[2] = img6[2]; tmpptr[3] = img7[2]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img4[2]; tmpptr[5] = img5[2]; tmpptr[6] = img6[2]; tmpptr[7] = img7[2]; tmpptr += 8; tmpptr[0] = img0[3]; tmpptr[1] = img1[3]; tmpptr[2] = img2[3]; tmpptr[3] = img3[3]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q + 3 < inch; q += 4) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += size; img1 += size; img2 += size; img3 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #else // __ARM_NEON tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); { #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < size; i++) { signed char* tmpptr = tmp.channel(i); int q = 0; for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #endif // __ARM_NEON int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "cmp %w4, #0 \n" "beq 1f \n" "ld1 {v8.16b}, [%8], #16 \n" // _w0123_l "ld1 {v0.16b}, [%7], #16 \n" // _val0123_l "0: \n" "ld1 {v1.16b}, [%7], #16 \n" // _val4567_l "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "ld1 {v2.16b}, [%7], #16 \n" // _val891011_l "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "ld1 {v3.16b}, [%7], #16 \n" // _val12131415_l "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "ld1 {v9.16b}, [%8], #16 \n" // _w0123_h "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "ld1 {v4.16b}, [%7], #16 \n" // _val0123_h "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "ld1 {v5.16b}, [%7], #16 \n" // _val4567_h "sdot v16.4s, v9.16b, v4.4b[0] \n" "sdot v17.4s, v9.16b, v4.4b[1] \n" "sdot v18.4s, v9.16b, v4.4b[2] \n" "sdot v19.4s, v9.16b, v4.4b[3] \n" "ld1 {v6.16b}, [%7], #16 \n" // _val891011_h "sdot v20.4s, v9.16b, v5.4b[0] \n" "sdot v21.4s, v9.16b, v5.4b[1] \n" "sdot v22.4s, v9.16b, v5.4b[2] \n" "sdot v23.4s, v9.16b, v5.4b[3] \n" "ld1 {v7.16b}, [%7], #16 \n" // _val12131415_h "sdot v24.4s, v9.16b, v6.4b[0] \n" "sdot v25.4s, v9.16b, v6.4b[1] \n" "ld1 {v8.16b}, [%8], #16 \n" // _w0123_l "sdot v26.4s, v9.16b, v6.4b[2] \n" "sdot v27.4s, v9.16b, v6.4b[3] \n" "ld1 {v0.16b}, [%7], #16 \n" // _val0123_l "sdot v28.4s, v9.16b, v7.4b[0] \n" "sdot v29.4s, v9.16b, v7.4b[1] \n" "subs %w4, %w4, #1 \n" "sdot v30.4s, v9.16b, v7.4b[2] \n" "sdot v31.4s, v9.16b, v7.4b[3] \n" "bne 0b \n" "sub %7, %7, #16 \n" "sub %8, %8, #16 \n" "1: \n" "cmp %w5, #0 \n" "beq 3f \n" "2: \n" "ld1 {v8.16b}, [%8], #16 \n" "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%7], #64 \n" "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "subs %w5, %w5, #1 \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "bne 2b \n" "3: \n" "lsr w4, %w6, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v8.8b, v9.8b}, [%8], #16 \n" "ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%7], #64 \n" "uzp1 v10.8b, v8.8b, v9.8b \n" "uzp2 v11.8b, v8.8b, v9.8b \n" "uzp1 v4.16b, v0.16b, v1.16b \n" "uzp2 v5.16b, v0.16b, v1.16b \n" "uzp1 v6.16b, v2.16b, v3.16b \n" "uzp2 v7.16b, v2.16b, v3.16b \n" "uzp1 v8.8b, v10.8b, v11.8b \n" "uzp2 v9.8b, v10.8b, v11.8b \n" "uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5 "uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d "mov v8.d[1], v9.d[0] \n" // _w "uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7 "uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v2.4b[0] \n" "sdot v19.4s, v8.16b, v2.4b[1] \n" "sdot v20.4s, v8.16b, v0.4b[2] \n" "sdot v21.4s, v8.16b, v0.4b[3] \n" "sdot v22.4s, v8.16b, v2.4b[2] \n" "sdot v23.4s, v8.16b, v2.4b[3] \n" "sdot v24.4s, v8.16b, v1.4b[0] \n" "sdot v25.4s, v8.16b, v1.4b[1] \n" "sdot v26.4s, v8.16b, v3.4b[0] \n" "sdot v27.4s, v8.16b, v3.4b[1] \n" "sdot v28.4s, v8.16b, v1.4b[2] \n" "sdot v29.4s, v8.16b, v1.4b[3] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "and w4, %w6, #3 \n" // w4 = remain = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "ld1 {v1.8b}, [%8] \n" "ld1 {v0.16b}, [%7] \n" "sshll v1.8h, v1.8b, #0 \n" "sshll v2.8h, v0.8b, #0 \n" "sshll2 v3.8h, v0.16b, #0 \n" "smlal v16.4s, v1.4h, v2.h[0] \n" "smlal v17.4s, v1.4h, v2.h[1] \n" "smlal v18.4s, v1.4h, v2.h[2] \n" "smlal v19.4s, v1.4h, v2.h[3] \n" "smlal v20.4s, v1.4h, v2.h[4] \n" "smlal v21.4s, v1.4h, v2.h[5] \n" "smlal v22.4s, v1.4h, v2.h[6] \n" "smlal v23.4s, v1.4h, v2.h[7] \n" "smlal v24.4s, v1.4h, v3.h[0] \n" "smlal v25.4s, v1.4h, v3.h[1] \n" "smlal v26.4s, v1.4h, v3.h[2] \n" "smlal v27.4s, v1.4h, v3.h[3] \n" "smlal v28.4s, v1.4h, v3.h[4] \n" "smlal v29.4s, v1.4h, v3.h[5] \n" "smlal v30.4s, v1.4h, v3.h[6] \n" "smlal v31.4s, v1.4h, v3.h[7] \n" "add %7, %7, #16 \n" "add %8, %8, #4 \n" "subs w4, w4, #1 \n" "bne 6b \n" "7: \n" // transpose 4x16 "trn1 v0.4s, v16.4s, v17.4s \n" "trn2 v1.4s, v16.4s, v17.4s \n" "trn1 v2.4s, v18.4s, v19.4s \n" "trn2 v3.4s, v18.4s, v19.4s \n" "trn1 v4.4s, v20.4s, v21.4s \n" "trn2 v5.4s, v20.4s, v21.4s \n" "trn1 v6.4s, v22.4s, v23.4s \n" "trn2 v7.4s, v22.4s, v23.4s \n" "trn1 v8.4s, v24.4s, v25.4s \n" "trn2 v9.4s, v24.4s, v25.4s \n" "trn1 v10.4s, v26.4s, v27.4s \n" "trn2 v11.4s, v26.4s, v27.4s \n" "trn1 v12.4s, v28.4s, v29.4s \n" "trn2 v13.4s, v28.4s, v29.4s \n" "trn1 v14.4s, v30.4s, v31.4s \n" "trn2 v15.4s, v30.4s, v31.4s \n" "trn1 v16.2d, v0.2d, v2.2d \n" "trn2 v24.2d, v0.2d, v2.2d \n" "trn1 v20.2d, v1.2d, v3.2d \n" "trn2 v28.2d, v1.2d, v3.2d \n" "trn1 v17.2d, v4.2d, v6.2d \n" "trn2 v25.2d, v4.2d, v6.2d \n" "trn1 v21.2d, v5.2d, v7.2d \n" "trn2 v29.2d, v5.2d, v7.2d \n" "trn1 v18.2d, v8.2d, v10.2d \n" "trn2 v26.2d, v8.2d, v10.2d \n" "trn1 v22.2d, v9.2d, v11.2d \n" "trn2 v30.2d, v9.2d, v11.2d \n" "trn1 v19.2d, v12.2d, v14.2d \n" "trn2 v27.2d, v12.2d, v14.2d \n" "trn1 v23.2d, v13.2d, v15.2d \n" "trn2 v31.2d, v13.2d, v15.2d \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%3], #64 \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn4), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn4), "6"(nn1), "7"(tmpptr), "8"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } for (int j = 0; j < nn4; j++) { int8x16_t _val0123 = vld1q_s8(tmpptr); int8x16_t _val4567 = vld1q_s8(tmpptr + 16); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0, _val4567, 3); tmpptr += 32; kptr0 += 16; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x8x4_t _val4 = vld4_s8(tmpptr); int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]); int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]); int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]); int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3); tmpptr += 32; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _val4 = vdup_n_s16(tmpptr[4]); int16x4_t _val5 = vdup_n_s16(tmpptr[5]); int16x4_t _val6 = vdup_n_s16(tmpptr[6]); int16x4_t _val7 = vdup_n_s16(tmpptr[7]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); _sum4 = vmlal_s16(_sum4, _val4, _w0123); _sum5 = vmlal_s16(_sum5, _val5, _w0123); _sum6 = vmlal_s16(_sum6, _val6, _w0123); _sum7 = vmlal_s16(_sum7, _val7, _w0123); tmpptr += 8; kptr0 += 4; } // transpose 4x8 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5); int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); _sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0])); _sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1])); _sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0])); _sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); vst1q_s32(outptr0 + 4, _sum4); vst1q_s32(outptr1 + 4, _sum5); vst1q_s32(outptr2 + 4, _sum6); vst1q_s32(outptr3 + 4, _sum7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } for (int j = 0; j < nn4; j++) { int8x16_t _val0123 = vld1q_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3); tmpptr += 16; kptr0 += 16; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x16_t _val = vld1q_s8(tmpptr); int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val)); int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]); int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3); tmpptr += 16; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); tmpptr += 4; kptr0 += 4; } // transpose 4x4 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "cmp %w4, #0 \n" "beq 3f \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%7, #128] \n" "prfm pldl1keep, [%8, #256] \n" "lsr w4, %w4, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%8, #512] \n" "add x5, %7, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%7] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%8], #64 \n" "add %7, %7, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%7] \n" "add %7, %7, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%8, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%8, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%7] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %7, %7, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%7] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %7, %7, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%8], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%7, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%7, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %7, %7, #64 \n" "sub %8, %8, #64 \n" "1: \n" "and w4, %w4, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%7], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%8], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%7], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "3: \n" "cmp %w5, #0 \n" "beq 7f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "lsr w4, %w5, #1 \n" // w4 = nn4 >> 1 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v16.8b, v17.8b}, [%7], #16 \n" "ld1 {v22.8b, v23.8b}, [%8], #16 \n" "zip1 v18.2s, v16.2s, v16.2s \n" // _val00 "zip2 v19.2s, v16.2s, v16.2s \n" // _val11 "smull v24.8h, v18.8b, v22.8b \n" "smull v25.8h, v18.8b, v23.8b \n" "zip1 v20.2s, v17.2s, v17.2s \n" // _val22 "smull v26.8h, v19.8b, v22.8b \n" "smull v27.8h, v19.8b, v23.8b \n" "zip2 v21.2s, v17.2s, v17.2s \n" // _val33 "smull v28.8h, v20.8b, v22.8b \n" "smull v29.8h, v20.8b, v23.8b \n" "ld1 {v16.8b, v17.8b}, [%7], #16 \n" "smull v30.8h, v21.8b, v22.8b \n" "smull v31.8h, v21.8b, v23.8b \n" "ld1 {v22.8b, v23.8b}, [%8], #16 \n" "zip1 v18.2s, v16.2s, v16.2s \n" // _val44 "zip2 v19.2s, v16.2s, v16.2s \n" // _val55 "smlal v24.8h, v18.8b, v22.8b \n" "smlal v25.8h, v18.8b, v23.8b \n" "zip1 v20.2s, v17.2s, v17.2s \n" // _val66 "smlal v26.8h, v19.8b, v22.8b \n" "smlal v27.8h, v19.8b, v23.8b \n" "zip2 v21.2s, v17.2s, v17.2s \n" // _val77 "sadalp v8.4s, v24.8h \n" "smlal v28.8h, v20.8b, v22.8b \n" "sadalp v9.4s, v25.8h \n" "smlal v29.8h, v20.8b, v23.8b \n" "sadalp v10.4s, v26.8h \n" "smlal v30.8h, v21.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal v31.8h, v21.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "subs w4, w4, #1 \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "bne 4b \n" "5: \n" "and w4, %w5, #1 \n" // w4 = remain = nn4 & 1 "cmp w4, #0 \n" // w4 > 0 "beq 6f \n" "ld1 {v16.8b, v17.8b}, [%7], #16 \n" "ld1 {v22.8b, v23.8b}, [%8], #16 \n" "zip1 v18.2s, v16.2s, v16.2s \n" // _val00 "zip2 v19.2s, v16.2s, v16.2s \n" // _val11 "smull v24.8h, v18.8b, v22.8b \n" "smull v25.8h, v18.8b, v23.8b \n" "zip1 v20.2s, v17.2s, v17.2s \n" // _val22 "smull v26.8h, v19.8b, v22.8b \n" "smull v27.8h, v19.8b, v23.8b \n" "zip2 v21.2s, v17.2s, v17.2s \n" // _val33 "sadalp v8.4s, v24.8h \n" "smull v28.8h, v20.8b, v22.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v20.8b, v23.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v21.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v21.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "6: \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "add v0.4s, v0.4s, v8.4s \n" "add v1.4s, v1.4s, v10.4s \n" "add v2.4s, v2.4s, v12.4s \n" "add v3.4s, v3.4s, v14.4s \n" "7: \n" "lsr w4, %w6, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 9f \n" "8: \n" "ld1 {v8.16b}, [%7], #16 \n" "ld1 {v9.16b}, [%8], #16 \n" "sshll v4.8h, v8.8b, #0 \n" "sshll2 v5.8h, v8.16b, #0 \n" "sshll v6.8h, v9.8b, #0 \n" "sshll2 v7.8h, v9.16b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "smlal2 v0.4s, v6.8h, v4.h[4] \n" "smlal2 v1.4s, v6.8h, v4.h[5] \n" "smlal2 v2.4s, v6.8h, v4.h[6] \n" "smlal2 v3.4s, v6.8h, v4.h[7] \n" "smlal v0.4s, v7.4h, v5.h[0] \n" "smlal v1.4s, v7.4h, v5.h[1] \n" "smlal v2.4s, v7.4h, v5.h[2] \n" "smlal v3.4s, v7.4h, v5.h[3] \n" "smlal2 v0.4s, v7.8h, v5.h[4] \n" "smlal2 v1.4s, v7.8h, v5.h[5] \n" "smlal2 v2.4s, v7.8h, v5.h[6] \n" "smlal2 v3.4s, v7.8h, v5.h[7] \n" "subs w4, w4, #1 \n" "bne 8b \n" "9: \n" "and w4, %w6, #3 \n" // w4 = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 11f \n" "10: \n" "ld1 {v4.8b}, [%7] \n" "ld1 {v6.8b}, [%8] \n" "sshll v4.8h, v4.8b, #0 \n" "sshll v6.8h, v6.8b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "add %7, %7, #4 \n" "add %8, %8, #4 \n" "subs w4, w4, #1 \n" "bne 10b \n" "11: \n" // transpose 4x4 "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "trn1 v6.4s, v2.4s, v3.4s \n" "trn2 v7.4s, v2.4s, v3.4s \n" "trn1 v0.2d, v4.2d, v6.2d \n" "trn2 v2.2d, v4.2d, v6.2d \n" "trn1 v1.2d, v5.2d, v7.2d \n" "trn2 v3.2d, v5.2d, v7.2d \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v2.4s}, [%2], #16 \n" "st1 {v3.4s}, [%3], #16 \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn4), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn4), "6"(nn1), "7"(tmpptr), "8"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; #if __aarch64__ int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2); _sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } if (nn4 > 0) { int j = 0; for (; j + 1 < nn4; j += 2) { int8x16_t _val0123 = vld1q_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0, _val0123, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0, _val0123, 1); int8x16_t _w1 = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w1, _val0123, 2); _sum10 = vdotq_laneq_s32(_sum10, _w1, _val0123, 3); tmpptr += 16; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val01 = vld1_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum00 = vdotq_lane_s32(_sum00, _w0, _val01, 0); _sum10 = vdotq_lane_s32(_sum10, _w0, _val01, 1); tmpptr += 8; kptr0 += 16; } } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); _sum00 = vpaddq_s32(_s001, _s023); _sum10 = vpaddq_s32(_s101, _s123); } if (nn4 > 0) { int32x4_t _sum100 = vdupq_n_s32(0); int32x4_t _sum101 = vdupq_n_s32(0); int32x4_t _sum110 = vdupq_n_s32(0); int32x4_t _sum111 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn4; j += 2) { int8x16_t _val0123 = vld1q_s8(tmpptr); int32x4x2_t _val00221133 = vzipq_s32(vreinterpretq_s32_s8(_val0123), vreinterpretq_s32_s8(_val0123)); int8x8_t _val00 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[0])); int8x8_t _val11 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[0])); int8x8_t _val22 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[1])); int8x8_t _val33 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[1])); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01)); int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01)); _wv00 = vmlal_s8(_wv00, _val22, vget_low_s8(_w23)); _wv01 = vmlal_s8(_wv01, _val22, vget_high_s8(_w23)); _wv10 = vmlal_s8(_wv10, _val33, vget_low_s8(_w23)); _wv11 = vmlal_s8(_wv11, _val33, vget_high_s8(_w23)); _sum100 = vpadalq_s16(_sum100, _wv00); _sum101 = vpadalq_s16(_sum101, _wv01); _sum110 = vpadalq_s16(_sum110, _wv10); _sum111 = vpadalq_s16(_sum111, _wv11); tmpptr += 16; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val01 = vld1_s8(tmpptr); int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01)); int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]); int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]); int8x16_t _w01 = vld1q_s8(kptr0); int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01)); int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01)); _sum100 = vpadalq_s16(_sum100, _wv00); _sum101 = vpadalq_s16(_sum101, _wv01); _sum110 = vpadalq_s16(_sum110, _wv10); _sum111 = vpadalq_s16(_sum111, _wv11); tmpptr += 8; kptr0 += 16; } int32x4_t _s001 = vpaddq_s32(_sum100, _sum101); int32x4_t _s101 = vpaddq_s32(_sum110, _sum111); _sum00 = vaddq_s32(_sum00, _s001); _sum10 = vaddq_s32(_sum10, _s101); } #endif // __ARM_FEATURE_DOTPROD int j = 0; for (; j + 3 < nn1; j += 4) { int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr)); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0); _sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1); _sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2); _sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3); _sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4); _sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5); _sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6); _sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7); tmpptr += 8; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum00 = vmlal_s16(_sum00, _val0, _w0123); _sum10 = vmlal_s16(_sum10, _val1, _w0123); tmpptr += 2; kptr0 += 4; } vst1q_lane_s32(outptr0, _sum00, 0); vst1q_lane_s32(outptr1, _sum00, 1); vst1q_lane_s32(outptr2, _sum00, 2); vst1q_lane_s32(outptr3, _sum00, 3); vst1q_lane_s32(outptr0 + 1, _sum10, 0); vst1q_lane_s32(outptr1 + 1, _sum10, 1); vst1q_lane_s32(outptr2 + 1, _sum10, 2); vst1q_lane_s32(outptr3 + 1, _sum10, 3); outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "cmp %4, #0 \n" "beq 3f \n" "pld [%7, #256] \n" "lsr r4, %4, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %8, #16 \n" "pld [%8, #128] \n" "mov r6, #32 \n" "pld [%8, #384] \n" "vld1.s8 {d20-d21}, [%8 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%7 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%8 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%7, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%8, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%7 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%8 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%7 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%8 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%7, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%8, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %7, %7, #32 \n" "sub %8, %8, #64 \n" "1: \n" "and r4, %4, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%7 :128]! \n" // _val "vld1.s8 {d20-d21}, [%8 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%8 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "3: \n" "cmp %5, #0 \n" "beq 7f \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "lsr r4, %5, #1 \n" // r4 = nn4 >> 1 "cmp r4, #0 \n" "beq 5f \n" "4: \n" "vld1.s8 {d16-d17}, [%7]! \n" // _val0123 "vld1.s8 {d20-d23}, [%8]! \n" // _w01 _w23 "vmov.s8 q9, q8 \n" "vtrn.s32 q8, q9 \n" // _val00 _val22 _val11 _val33 "vmull.s8 q12, d16, d20 \n" "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d18, d20 \n" "vmull.s8 q15, d18, d21 \n" "vmlal.s8 q12, d17, d22 \n" "vmlal.s8 q13, d17, d23 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q4, q14 \n" "vpadal.s16 q5, q15 \n" "subs r4, r4, #1 \n" "bne 4b \n" "5: \n" "and r4, %5, #1 \n" // r4 = nn4 & 1 "cmp r4, #0 \n" // r4 > 0 "beq 6f \n" "vld1.s8 {d16}, [%7]! \n" // _val01 "vld1.s8 {d18-d19}, [%8]! \n" // _w01 "vmov.s8 d17, d16 \n" "vtrn.s32 d16, d17 \n" // _val00 _val11 "vmull.s8 q12, d16, d18 \n" "vmull.s8 q13, d16, d19 \n" "vmull.s8 q14, d17, d18 \n" "vmull.s8 q15, d17, d19 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q4, q14 \n" "vpadal.s16 q5, q15 \n" "6: \n" "vpadd.s32 d16, d4, d5 \n" "vpadd.s32 d17, d6, d7 \n" "vpadd.s32 d18, d8, d9 \n" "vpadd.s32 d19, d10, d11 \n" "vadd.s32 q0, q0, q8 \n" "vadd.s32 q1, q1, q9 \n" "7: \n" "lsr r4, %6, #2 \n" // r4 = nn1 >> 2 "cmp r4, #0 \n" "beq 9f \n" "8: \n" "vld1.s8 {d4}, [%7]! \n" "vmovl.s8 q2, d4 \n" "vld1.s8 {d10-d11}, [%8]! \n" "vmovl.s8 q3, d10 \n" "vmovl.s8 q4, d11 \n" "vmlal.s16 q0, d6, d4[0] \n" "vmlal.s16 q1, d6, d4[1] \n" "vmlal.s16 q0, d7, d4[2] \n" "vmlal.s16 q1, d7, d4[3] \n" "vmlal.s16 q0, d8, d5[0] \n" "vmlal.s16 q1, d8, d5[1] \n" "vmlal.s16 q0, d9, d5[2] \n" "vmlal.s16 q1, d9, d5[3] \n" "subs r4, r4, #1 \n" "bne 8b \n" "9: \n" "and r4, %6, #3 \n" // r4 = nn1 & 3 "cmp r4, #0 \n" // w4 > 0 "beq 11f \n" "10: \n" "vld1.s8 {d4[]}, [%7]! \n" "vld1.s8 {d6[]}, [%7]! \n" "vmovl.s8 q2, d4 \n" "vmovl.s8 q3, d6 \n" "vld1.s8 {d8}, [%8] \n" "vmovl.s8 q4, d8 \n" "vmlal.s16 q0, d4, d8 \n" "vmlal.s16 q1, d6, d8 \n" "add %8, %8, #4 \n" "subs r4, r4, #1 \n" "bne 10b \n" "11: \n" "vst1.s32 {d0[0]}, [%0]! \n" "vst1.s32 {d0[1]}, [%1]! \n" "vst1.s32 {d1[0]}, [%2]! \n" "vst1.s32 {d1[1]}, [%3]! \n" "vst1.s32 {d2[0]}, [%0]! \n" "vst1.s32 {d2[1]}, [%1]! \n" "vst1.s32 {d3[0]}, [%2]! \n" "vst1.s32 {d3[1]}, [%3]! \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn4), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn4), "6"(nn1), "7"(tmpptr), "8"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } if (nn4 > 0) { int j = 0; for (; j + 1 < nn4; j += 2) { int8x8_t _val01 = vld1_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0, _val01, 0); int8x16_t _w1 = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w1, _val01, 1); tmpptr += 8; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val_xxx = vld1_s8(tmpptr); int8x16_t _w0 = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0, _val_xxx, 0); tmpptr += 4; kptr0 += 16; } } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif } if (nn4 > 0) { int32x4_t _sum10 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn4; j += 2) { int8x8_t _val01 = vld1_s8(tmpptr); int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01)); int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]); int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]); int8x16_t _w0 = vld1q_s8(kptr0); int8x16_t _w1 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val00, vget_low_s8(_w0)); int16x8_t _wv1 = vmull_s8(_val00, vget_high_s8(_w0)); _wv0 = vmlal_s8(_wv0, _val11, vget_low_s8(_w1)); _wv1 = vmlal_s8(_wv1, _val11, vget_high_s8(_w1)); _sum10 = vpadalq_s16(_sum10, _wv0); _sum11 = vpadalq_s16(_sum11, _wv1); tmpptr += 8; kptr0 += 32; } for (; j < nn4; j++) { int8x8_t _val_xxx = vld1_s8(tmpptr); int8x8_t _val_val = vreinterpret_s8_s32(vzip_s32(vreinterpret_s32_s8(_val_xxx), vreinterpret_s32_s8(_val_xxx)).val[0]); int8x16_t _w0 = vld1q_s8(kptr0); int16x8_t _wv0 = vmull_s8(_val_val, vget_low_s8(_w0)); int16x8_t _wv1 = vmull_s8(_val_val, vget_high_s8(_w0)); _sum10 = vpadalq_s16(_sum10, _wv0); _sum11 = vpadalq_s16(_sum11, _wv1); tmpptr += 4; kptr0 += 16; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum10, _sum11); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum10), vget_high_s32(_sum10)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum11), vget_high_s32(_sum11)); int32x4_t _s01 = vcombine_s32(_s01_low, _s01_high); #endif _sum0 = vaddq_s32(_sum0, _s01); } #endif // __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 3 < nn1; j += 4) { int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr))); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0); _sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1); _sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2); _sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3); tmpptr += 4; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val = vdup_n_s16(tmpptr[0]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val, _w0123); tmpptr += 1; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } remain_outch_start += nn_outch << 2; #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32); int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48); int8x16_t _val0123_h = vld1q_s8(tmpptr + 64); int8x16_t _val4567_h = vld1q_s8(tmpptr + 80); int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96); int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0); _sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0); _sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1); _sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1); _sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1); tmpptr += 128; kptr0 += 8; } if (nn4 > 0) { int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn4; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _val2 = vld1q_s8(tmpptr + 32); int8x16_t _val3 = vld1q_s8(tmpptr + 48); int8x8_t _w_0123_xxxx = vld1_s8(kptr0); _sum4 = vdotq_lane_s32(_sum4, _val0, _w_0123_xxxx, 0); _sum5 = vdotq_lane_s32(_sum5, _val1, _w_0123_xxxx, 0); _sum6 = vdotq_lane_s32(_sum6, _val2, _w_0123_xxxx, 0); _sum7 = vdotq_lane_s32(_sum7, _val3, _w_0123_xxxx, 0); tmpptr += 64; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum4); _sum1 = vaddq_s32(_sum1, _sum5); _sum2 = vaddq_s32(_sum2, _sum6); _sum3 = vaddq_s32(_sum3, _sum7); } int j = 0; for (; j < nn1; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); if (nn > 0) { int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1); tmpptr += 64; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); } if (nn4 > 0) { int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn4; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x8_t _w_0123_xxxx = vld1_s8(kptr0); _sum2 = vdotq_lane_s32(_sum2, _val0, _w_0123_xxxx, 0); _sum3 = vdotq_lane_s32(_sum3, _val1, _w_0123_xxxx, 0); tmpptr += 32; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); } int j = 0; for (; j < nn1; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s)); tmpptr += 8; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; } #endif // __ARM_FEATURE_DOTPROD for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); #else // __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _val2 = vld1q_s8(tmpptr + 32); int8x16_t _val3 = vld1q_s8(tmpptr + 48); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w)); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w)); _s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w)); _s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 64; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4)); int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6)); int32x2_t _ss0 = vpadd_s32(_s0, _s2); int32x2_t _ss1 = vpadd_s32(_s4, _s6); _sum0 = vcombine_s32(_ss0, _ss1); #endif // __ARM_FEATURE_DOTPROD } int sum0123[4] = {0, 0, 0, 0}; if (nn4 > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j < nn4; j++) { int8x16_t _val0123_lh = vld1q_s8(tmpptr); int8x8_t _w_lh_xx = vld1_s8(kptr0); _sum1 = vdotq_lane_s32(_sum1, _val0123_lh, _w_lh_xx, 0); tmpptr += 16; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); #else // __ARM_FEATURE_DOTPROD int j = 0; for (; j < nn4; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char val4 = tmpptr[4]; signed char val5 = tmpptr[5]; signed char val6 = tmpptr[6]; signed char val7 = tmpptr[7]; signed char val8 = tmpptr[8]; signed char val9 = tmpptr[9]; signed char val10 = tmpptr[10]; signed char val11 = tmpptr[11]; signed char val12 = tmpptr[12]; signed char val13 = tmpptr[13]; signed char val14 = tmpptr[14]; signed char val15 = tmpptr[15]; signed char w0 = kptr0[0]; signed char w1 = kptr0[1]; signed char w2 = kptr0[2]; signed char w3 = kptr0[3]; sum0123[0] += val0 * w0; sum0123[0] += val1 * w1; sum0123[0] += val2 * w2; sum0123[0] += val3 * w3; sum0123[1] += val4 * w0; sum0123[1] += val5 * w1; sum0123[1] += val6 * w2; sum0123[1] += val7 * w3; sum0123[2] += val8 * w0; sum0123[2] += val9 * w1; sum0123[2] += val10 * w2; sum0123[2] += val11 * w3; sum0123[3] += val12 * w0; sum0123[3] += val13 * w1; sum0123[3] += val14 * w2; sum0123[3] += val15 * w3; tmpptr += 16; kptr0 += 4; } #endif // __ARM_FEATURE_DOTPROD } int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char w = kptr0[0]; sum0123[0] += val0 * w; sum0123[1] += val1 * w; sum0123[2] += val2 * w; sum0123[3] += val3 * w; tmpptr += 4; kptr0 += 1; } _sum0 = vaddq_s32(_sum0, vld1q_s32(sum0123)); vst1q_s32(outptr0, _sum0); outptr0 += 4; } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int32x2_t _sum = vdup_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x2_t _sum0 = vdup_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val01_lh = vld1q_s8(tmpptr); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0); _sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1); tmpptr += 16; kptr0 += 8; } _sum = vadd_s32(_sum0, _sum1); #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 32; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); _sum = vpadd_s32(_s0, _s2); #endif // __ARM_FEATURE_DOTPROD } int sum01[2] = {0, 0}; if (nn4 > 0) { int j = 0; for (; j < nn4; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char val4 = tmpptr[4]; signed char val5 = tmpptr[5]; signed char val6 = tmpptr[6]; signed char val7 = tmpptr[7]; signed char w0 = kptr0[0]; signed char w1 = kptr0[1]; signed char w2 = kptr0[2]; signed char w3 = kptr0[3]; sum01[0] += val0 * w0; sum01[0] += val1 * w1; sum01[0] += val2 * w2; sum01[0] += val3 * w3; sum01[1] += val4 * w0; sum01[1] += val5 * w1; sum01[1] += val6 * w2; sum01[1] += val7 * w3; tmpptr += 8; kptr0 += 4; } } int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char w = kptr0[0]; sum01[0] += val0 * w; sum01[1] += val1 * w; tmpptr += 2; kptr0 += 1; } _sum = vadd_s32(_sum, vld1_s32(sum01)); vst1_s32(outptr0, _sum); outptr0 += 2; } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn4 = ((inch % 8) / 4) * maxk; int nn1 = (inch % 4) * maxk; int sum = 0; if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); _sum0 = vdotq_s32(_sum0, _val, _w); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); _sum1 = vdot_s32(_sum1, _val, _w); tmpptr += 8; kptr0 += 8; } sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1); #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w)); _s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s8 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 8; kptr0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ sum = vaddvq_s32(_sum); // dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); sum = vget_lane_s32(_ss, 0); #endif #endif // __ARM_FEATURE_DOTPROD } if (nn4 > 0) { int j = 0; for (; j < nn4; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char w0 = kptr0[0]; signed char w1 = kptr0[1]; signed char w2 = kptr0[2]; signed char w3 = kptr0[3]; sum += val0 * w0; sum += val1 * w1; sum += val2 * w2; sum += val3 * w3; tmpptr += 4; kptr0 += 4; } } int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #else // __ARM_NEON for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i); const signed char* kptr0 = kernel.channel(p); int nn1 = inch * maxk; int sum = 0; int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #endif // __ARM_NEON } } static void convolution_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { convolution_im2col_sgemm_transform_kernel_int8_neon_asimddp(_kernel, kernel_tm, inch, outch, kernel_w, kernel_h); return; } #endif const int maxk = kernel_w * kernel_h; #if __ARM_NEON // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) { if (inch >= 8) kernel_tm.create(32 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u); else if (inch >= 4) kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u); else kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, (size_t)1u); } else { if (inch >= 8) kernel_tm.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch, (size_t)1u); else if (inch >= 4) kernel_tm.create(4 * maxk, inch / 4 + inch % 4, outch, (size_t)1u); else kernel_tm.create(1 * maxk, inch, outch, (size_t)1u); } int q = 0; for (; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } // TODO unroll 2 for (; q < outch; q++) { signed char* g00 = kernel_tm.channel(q / 4 + q % 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } for (; p + 3 < inch; p += 4) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } #else // __ARM_NEON kernel_tm = _kernel.reshape(maxk, inch, outch); #endif // __ARM_NEON } static void convolution_im2col_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt); }
soma_clustering.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & University of Surrey for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- // // This model examplifies the use of extracellur diffusion and shows // how to extend the default "Cell". In step 0 one can see how an extra // data member is added and can be accessed throughout the simulation with // its Get and Set methods. N cells are randomly positioned in space, of which // half are of type 1 and half of type -1. Each type secretes a different // substance. Cells move towards the gradient of their own substance, which // results in clusters being formed of cells of the same type. // #ifndef DEMO_SOMA_CLUSTERING_H_ #define DEMO_SOMA_CLUSTERING_H_ #include <vector> #include "biodynamo.h" #include "my_cell.h" #include "validation_criterion.h" namespace bdm { namespace soma_clustering { enum Substances { kSubstance0, kSubstance1 }; inline int Simulate(int argc, const char** argv) { auto set_param = [](Param* param) { // Create an artificial bound for the simulation space param->bound_space = Param::BoundSpaceMode::kClosed; param->min_bound = 0; param->max_bound = 250; param->unschedule_default_operations = {"mechanical forces"}; }; Simulation simulation(argc, argv, set_param); // Define initial model auto* param = simulation.GetParam(); int num_cells = 20000; #pragma omp parallel simulation.GetRandom()->SetSeed(4357); // Define the substances that cells may secrete // Order: substance_name, diffusion_coefficient, decay_constant, resolution ModelInitializer::DefineSubstance(kSubstance0, "Substance_0", 0.5, 0.1, 20); ModelInitializer::DefineSubstance(kSubstance1, "Substance_1", 0.5, 0.1, 20); int cell_type = 1; std::string substance_name = "Substance_0"; auto construct = [&cell_type, &substance_name](const Double3& position) { auto* cell = new MyCell(position, cell_type); cell->SetDiameter(10); cell->AddBehavior(new Secretion(substance_name)); cell->AddBehavior(new Chemotaxis(substance_name, 5)); return cell; }; // Construct num_cells/2 cells of type 0 ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound, num_cells / 2, construct); // Construct num_cells/2 cells of type 1 cell_type = -1; substance_name = "Substance_1"; ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound, num_cells / 2, construct); // Run simulation for N timesteps simulation.GetScheduler()->Simulate(1000); // Check if criterion is met double spatial_range = 5; auto crit = GetCriterion(spatial_range, num_cells / 8); if (crit) { std::cout << "Simulation completed successfully!\n"; } return !crit; } } // namespace soma_clustering } // namespace bdm #endif // DEMO_SOMA_CLUSTERING_H_
singlenode_intersectreduce.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_INTERSECTREDUCE_H_ #define SRC_SINGLENODE_INTERSECTREDUCE_H_ #include <algorithm> #include "src/bitvector.h" #ifdef INTERSECT_MKL #error template <typename Ta, typename Tb, typename Tc> void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb, int *ib, Tc **c, int **jc, int **ic, Tc (*op_fp)(Ta, Tb)) { int nnzc = std::max(ia[m] - 1, ib[m] - 1); int nzmax = nnzc; (*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64)); (*c) = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(Tc), 64)); (*jc) = reinterpret_cast<int *>( _mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(int), 64)); // Add new_c = c + tc int cnz_cnt = 0; for (int row = 0; row < m; row++) { (*ic)[row] = cnz_cnt + 1; // Merge c row and tc row into new_c row int Astart = ia[row]; int Aend = ia[row + 1]; int Bstart = ib[row]; int Bend = ib[row + 1]; while ((Astart < Aend) || (Bstart < Bend)) { int Acol = (Astart != Aend) ? ja[Astart - 1] : INT_MAX; int Bcol = (Bstart != Bend) ? jb[Bstart - 1] : INT_MAX; if (Acol < Bcol) { Astart++; } else if (Bcol < Acol) { Bstart++; } else { (*c)[cnz_cnt] = op_fp(a[Astart - 1], b[Bstart - 1], vsp); (*jc)[cnz_cnt] = Acol; cnz_cnt++; Astart++; Bstart++; } } } (*ic)[m] = cnz_cnt + 1; } #endif #ifdef INTERSECT_NAIVE_MERGE template <typename Ta, typename Tb, typename Tc> void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb, int *ib, Tc **c, int **jc, int **ic, Tc (*op_fp)(Ta, Tb)) { #ifndef SORTED #error Merge kernels require sorted inputs #endif int nnzc = std::max(ia[m] - 1, ib[m] - 1); int nzmax = nnzc; (*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64)); (*c) = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(Tc), 64)); (*jc) = reinterpret_cast<int *>( _mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(int), 64)); // Add new_c = c + tc int cnz_cnt = 0; for (int row = 0; row < m; row++) { (*ic)[row] = cnz_cnt + 1; // Merge c row and tc row into new_c row int Astart = ia[row]; int Aend = ia[row + 1]; int Bstart = ib[row]; int Bend = ib[row + 1]; while ((Astart < Aend) || (Bstart < Bend)) { int Acol = (Astart != Aend) ? ja[Astart - 1] : INT_MAX; int Bcol = (Bstart != Bend) ? jb[Bstart - 1] : INT_MAX; if (Acol < Bcol) { Astart++; } else if (Bcol < Acol) { Bstart++; } else { (*c)[cnz_cnt] = op_fp(a[Astart - 1], b[Bstart - 1], vsp); (*jc)[cnz_cnt] = Acol; cnz_cnt++; Astart++; Bstart++; } } } (*ic)[m] = cnz_cnt + 1; } #endif #ifdef INTERSECT_PARALLEL_MERGE template <typename Ta, typename Tb, typename Tc> void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb, int *ib, Tc **c, int **jc, int **ic, void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) { #ifndef SORTED #error Merge kernels require sorted inputs #endif int num_threads = omp_get_max_threads(); assert(num_threads <= omp_get_max_threads()); (*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64)); int nchunks = num_threads; int chunksize = (m + nchunks - 1) / nchunks; int *nnzs = reinterpret_cast<int *>(_mm_malloc((nchunks + 1) * sizeof(int), 64)); memset(nnzs, 0, num_threads * sizeof(int)); Tc **c_t = new Tc *[nchunks]; int **jc_t = new int *[nchunks]; #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); #pragma omp for schedule(dynamic) for (int chunk = 0; chunk < nchunks; chunk++) { int start_row = chunk * chunksize; int end_row = (chunk + 1) * chunksize; if (end_row > m) end_row = m; // Determine number of nonzeros int nnzA = ia[end_row] - ia[start_row]; int nnzB = ib[end_row] - ib[start_row]; int nnzmax = std::max(nnzA, nnzB); // Allocate space for nonzeros c_t[chunk] = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(Tc), 64)); jc_t[chunk] = reinterpret_cast<int *>( _mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(int), 64)); int cnz_cnt = 0; for (int row = start_row; row < end_row; row++) { (*ic)[row] = cnz_cnt + 1; // Merge c row and tc row into new_c row int Astart = ia[row]; int Aend = ia[row + 1]; int Bstart = ib[row]; int Bend = ib[row + 1]; while ((Astart < Aend) || (Bstart < Bend)) { int Acol = (Astart != Aend) ? ja[Astart - 1] : INT_MAX; int Bcol = (Bstart != Bend) ? jb[Bstart - 1] : INT_MAX; if (Acol < Bcol) { Astart++; } else if (Bcol < Acol) { Bstart++; } else { op_fp(a[Astart - 1], b[Bstart - 1], &(c_t[chunk][cnz_cnt]), vsp); jc_t[chunk][cnz_cnt] = Acol; cnz_cnt++; Astart++; Bstart++; } } } nnzs[chunk] = cnz_cnt; } // for each chunk } // pragma omp parallel // Main thread allocates a large result array int nnzc = 0; for (int chunk = 0; chunk < nchunks; chunk++) { int tmp = nnzs[chunk]; nnzs[chunk] = nnzc; nnzc += tmp; } nnzs[nchunks] = nnzc; (*c) = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64)); (*jc) = reinterpret_cast<int *>( _mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64)); #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); #pragma omp for schedule(dynamic) for (int chunk = 0; chunk < nchunks; chunk++) { int start_row = chunk * chunksize; int end_row = (chunk + 1) * chunksize; if (end_row > m) end_row = m; #pragma simd for (int Arow = start_row; Arow < end_row; Arow++) { (*ic)[Arow] += nnzs[chunk]; } memcpy((*c) + nnzs[chunk], c_t[chunk], (nnzs[chunk + 1] - nnzs[chunk]) * sizeof(Tc)); memcpy((*jc) + nnzs[chunk], jc_t[chunk], (nnzs[chunk + 1] - nnzs[chunk]) * sizeof(int)); _mm_free(c_t[chunk]); _mm_free(jc_t[chunk]); } } // pragma omp parallel (*ic)[m] = nnzs[nchunks] + 1; delete c_t; delete jc_t; _mm_free(nnzs); } #endif #ifdef INTERSECT_NAIVE_SPA bool cmp_int_intersect_naive(int i1, int i2) { return i1 < i2; } template <typename Ta, typename Tb, typename Tc> void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb, int *ib, Tc **c, int **jc, int **ic, Tc (*op_fp)(Ta, Tb)) { Tc *Crow = reinterpret_cast<Tc *>(_mm_malloc(n * sizeof(Tc), 64)); int *Cidxs = reinterpret_cast<int *>(_mm_malloc(n * sizeof(int), 64)); bool *Cflags = reinterpret_cast<bool *>(_mm_malloc(n * sizeof(bool), 64)); memset(Crow, 0, n * sizeof(Tc)); memset(Cflags, 0, n * sizeof(bool)); int nnzA = ia[m] - 1; int nnzB = ib[m] - 1; int nnzmax = std::max(nnzA, nnzB); (*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64)); (*c) = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(Tc), 64)); (*jc) = reinterpret_cast<int *>( _mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(int), 64)); int cint_cnt = 0; for (int Arow = 0; Arow < m; Arow++) { int c_row_int_start = cint_cnt; int Arow_nnz = 0; (*ic)[Arow] = cint_cnt + 1; for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) { int Acol = ja[Anz_id - 1]; Cidxs[Arow_nnz] = Acol - 1; Cflags[Acol - 1] = true; Crow[Acol - 1] = a[Anz_id - 1]; Arow_nnz++; } for (int Bnz_id = ib[Arow]; Bnz_id < ib[Arow + 1]; Bnz_id++) { int Bcol = jb[Bnz_id - 1]; if (Cflags[Bcol - 1]) { (*jc)[cint_cnt] = Bcol; cint_cnt++; Crow[Bcol - 1] = op_fp(Crow[Bcol - 1], b[Bnz_id - 1], vsp); } } #ifdef SORTED std::sort((*jc) + c_row_int_start, (*jc) + cint_cnt, cmp_int_intersect_naive); #endif for (int Cnz_id = 0; Cnz_id < Arow_nnz; Cnz_id++) { Cflags[Cidxs[Cnz_id]] = 0; } for (int Cnz_id = c_row_int_start; Cnz_id < cint_cnt; Cnz_id++) { int Ccol = (*jc)[Cnz_id]; (*c)[Cnz_id] = Crow[Ccol - 1]; } } (*ic)[m] = cint_cnt + 1; _mm_free(Cidxs); _mm_free(Crow); _mm_free(Cflags); } #endif #ifdef INTERSECT_PARALLEL_SPA bool cmp_int_intersect_parallel(int i1, int i2) { return i1 < i2; } template <typename Ta, typename Tb, typename Tc> void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb, int *ib, Tc **c, int **jc, int **ic, void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) { int num_threads = omp_get_max_threads(); assert(num_threads <= omp_get_max_threads()); (*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64)); int nchunks = num_threads; int chunksize = (m + nchunks - 1) / nchunks; int *nnzs = reinterpret_cast<int *>(_mm_malloc((nchunks + 1) * sizeof(int), 64)); memset(nnzs, 0, num_threads * sizeof(int)); Tc **c_t = new Tc *[nchunks]; int **jc_t = new int *[nchunks]; Tc **Crow = new Tc *[num_threads]; int **Cidxs = new int *[num_threads]; bool **Cflags = new bool *[num_threads]; #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); Crow[tid] = reinterpret_cast<Tc *>(_mm_malloc(n * sizeof(Tc), 64)); Cidxs[tid] = reinterpret_cast<int *>(_mm_malloc(n * sizeof(Tc), 64)); Cflags[tid] = reinterpret_cast<bool *>(_mm_malloc(n * sizeof(bool), 64)); memset(Crow[tid], 0, n * sizeof(Tc)); memset(Cidxs[tid], 0, n * sizeof(int)); memset(Cflags[tid], 0, n * sizeof(bool)); #pragma omp for schedule(dynamic) for (int chunk = 0; chunk < nchunks; chunk++) { int start_row = chunk * chunksize; int end_row = (chunk + 1) * chunksize; if (end_row > m) end_row = m; // Determine number of nonzeros int nnzA = ia[end_row] - ia[start_row]; int nnzB = ib[end_row] - ib[start_row]; int nnzmax = std::max(nnzA, nnzB); // Allocate space for nonzeros c_t[chunk] = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(Tc), 64)); jc_t[chunk] = reinterpret_cast<int *>( _mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(int), 64)); int cint_cnt = 0; for (int row = start_row; row < end_row; row++) { (*ic)[row] = cint_cnt + 1; int c_row_int_start = cint_cnt; int Arow_nnz = 0; for (int Anz_id = ia[row]; Anz_id < ia[row + 1]; Anz_id++) { int Acol = ja[Anz_id - 1]; Cidxs[tid][Arow_nnz] = Acol - 1; Cflags[tid][Acol - 1] = true; Crow[tid][Acol - 1] = a[Anz_id - 1]; Arow_nnz++; } for (int Bnz_id = ib[row]; Bnz_id < ib[row + 1]; Bnz_id++) { int Bcol = jb[Bnz_id - 1]; if (Cflags[tid][Bcol - 1]) { jc_t[chunk][cint_cnt] = Bcol; cint_cnt++; op_fp(Crow[tid][Bcol - 1], b[Bnz_id - 1], &(Crow[tid][Bcol-1]), vsp); } } #ifdef SORTED std::sort(jc_t[chunk] + c_row_int_start, jc_t[chunk] + cint_cnt, cmp_int_intersect_parallel); #endif for (int Cnz_id = 0; Cnz_id < Arow_nnz; Cnz_id++) { Cflags[tid][Cidxs[tid][Cnz_id]] = 0; } for (int Cnz_id = c_row_int_start; Cnz_id < cint_cnt; Cnz_id++) { int Ccol = jc_t[chunk][Cnz_id]; c_t[chunk][Cnz_id] = Crow[tid][Ccol - 1]; } } nnzs[chunk] = cint_cnt; } // for each chunk } // pragma omp parallel // Main thread allocates a large result array int nnzc = 0; for (int chunk = 0; chunk < nchunks; chunk++) { int tmp = nnzs[chunk]; nnzs[chunk] = nnzc; nnzc += tmp; } nnzs[nchunks] = nnzc; (*c) = reinterpret_cast<Tc *>( _mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64)); (*jc) = reinterpret_cast<int *>( _mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64)); #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); #pragma omp for schedule(dynamic) for (int chunk = 0; chunk < nchunks; chunk++) { int start_row = chunk * chunksize; int end_row = (chunk + 1) * chunksize; if (end_row > m) end_row = m; #pragma simd for (int Arow = start_row; Arow < end_row; Arow++) { (*ic)[Arow] += nnzs[chunk]; } memcpy((*c) + nnzs[chunk], c_t[chunk], (nnzs[chunk + 1] - nnzs[chunk]) * sizeof(Tc)); memcpy((*jc) + nnzs[chunk], jc_t[chunk], (nnzs[chunk + 1] - nnzs[chunk]) * sizeof(int)); _mm_free(c_t[chunk]); _mm_free(jc_t[chunk]); } } // pragma omp parallel (*ic)[m] = nnzs[nchunks] + 1; delete c_t; delete jc_t; _mm_free(nnzs); } #endif template <typename Ta, typename Tb, typename Tc> void intersect_dense_segment(Ta* v1, int * bv1, int * nnz, int num_ints, Tb * v2, int * bv2, Tc * v3, int * bv3, void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) { #pragma omp parallel for for(int i = 0 ; i < num_ints ; i++) { bv3[i] = bv1[i] & bv2[i]; } int tmp_nnz = 0; #pragma omp parallel for reduction(+:tmp_nnz) for(int ii = 0 ; ii < num_ints ; ii++) { int cnt = _popcnt32(bv3[ii]); if(cnt == 0) continue; tmp_nnz += cnt; for(int i = ii*32 ; i < (ii+1)*32 ; i++) { if(get_bitvector(i, bv3)) { Ta tmp = v1[i]; op_fp(v1[i], v2[i], &(v3[i]), vsp); } } } *nnz = tmp_nnz; } #endif // SRC_SINGLENODE_INTERSECTREDUCE_H_
implied_vol_newton_ver3.c
// // implied_volatility_newton.c // // // Created by Domenico Natella on 10/25/16. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> #include <mpi.h> #include <omp.h> #define SIZE 110 #define MAX_ITERATIONS 1000000 struct option{ double V_market[SIZE][2]; double K[SIZE]; double implied_vol[SIZE]; double T; double S; double r; }; struct tm create_tm(int year, int month, int day){ struct tm my_time = { .tm_year=year, .tm_mon=month, .tm_mday=day, .tm_hour=0, .tm_min=0, .tm_sec=0 }; return my_time; } struct option load(char* filename){ FILE* file = fopen(filename, "r"); struct option op; fscanf(file, "%lf", &op.S); char tmp[12],cp[2]; fscanf(file, "%s", tmp); char s[2] = "/"; char *token; token = strtok(tmp, s); int date[3]={0,0,0}; int i = 0; while( token != NULL ){ date[i] = atoi(token); token = strtok(NULL, s); i++; } time_t now; time(&now); struct tm option_t = create_tm(date[0]-1900, date[1]-1, date[2]); time_t opt_t_conv = mktime(&option_t); double diff_t = difftime(opt_t_conv, now); op.T = (diff_t/86400)/365.; i=0; while(fscanf(file, "%s", tmp)!=EOF){ if(strcmp(tmp, "c")==0 | strcmp(tmp, "p")==0) strcpy(cp,tmp); else{ op.K[i] = atof(strtok(tmp,s)); op.V_market[i][0] = atof(strtok(NULL,s)); if(strcmp(cp, "c")==0) op.V_market[i][1] = 0.; else if(strcmp(cp, "p")==0) op.V_market[i][1] = 1.; } i++; } op.r = 0.03; return op; } double pdf(const double x) { return (1.0/(pow(2*M_PI,0.5)))*exp(-0.5*x*x); } double cdf(double x){ double RT2PI = sqrt(4.0*acos(0.0)); static const double SPLIT = 7.07106781186547; static const double N0 = 220.206867912376; static const double N1 = 221.213596169931; static const double N2 = 112.079291497871; static const double N3 = 33.912866078383; static const double N4 = 6.37396220353165; static const double N5 = 0.700383064443688; static const double N6 = 3.52624965998911e-02; static const double M0 = 440.413735824752; static const double M1 = 793.826512519948; static const double M2 = 637.333633378831; static const double M3 = 296.564248779674; static const double M4 = 86.7807322029461; static const double M5 = 16.064177579207; static const double M6 = 1.75566716318264; static const double M7 = 8.83883476483184e-02; const double z = fabs(x); double c = 0.0; if(z<=37.0){ const double e = exp(-z*z/2.0); if(z<SPLIT){ const double n = (((((N6*z + N5)*z + N4)*z + N3)*z + N2)*z + N1)*z + N0; const double d = ((((((M7*z + M6)*z + M5)*z + M4)*z + M3)*z + M2)*z + M1)*z + M0; c = e*n/d;} else{ const double f = z + 1.0/(z + 2.0/(z + 3.0/(z + 4.0/(z + 13.0/20.0)))); c = e/(RT2PI*f);} } return x<=0.0 ? c : 1-c; } double d_j(int j, double S, double K, double r, double sigma, double T){ double d1 = (log(S/K) + (r + 0.5*sigma*sigma)*T)/(sigma*(pow(T,0.5))); if(j==1) return d1; else return d1-sigma*pow(T,0.5); } double call_price(double S, double K, double r, double sigma, double T, double type){ if(type==0.) return S * cdf(d_j(1, S, K, r, sigma, T))-K*exp(-r*T) * cdf(d_j(2, S, K, r,sigma, T)); else return K*exp(-r*T) * cdf(d_j(2, S, K, r,sigma, T)) - S * cdf(d_j(1, S, K, r, sigma, T)) ; } double call_vega(const double S, const double K, const double r, const double sigma, const double T) { return S * sqrt(T) * pdf(d_j(1, S, K, r, sigma, T)); } double newton_raphson(double y_target, double init, double epsilon, double S, double K, double r, double T, double type){ double x = init; double y = call_price(S, K, r, x, T,type); int i=0; while (fabs(y-y_target) > epsilon) { if(i >= MAX_ITERATIONS) break; double d_x = call_vega(S, K, r, x, T); x += (y-y_target)/d_x; y = call_price(S,K,r,x,T,type); i++; } return fabs(x); } int main(int argc, char** argv){ // First we create the parameter list // S: Underlying spot price // K: Strike price // r: Risk-free rate (5%) // T: One year until expiry // C_M: Option market price int rank,size,i,j,k,len=7; double low_vol = 0.3, epsilon = 0.001; struct option op[len], toReturn[len]; int err = MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 1) { fprintf(stderr, "World size must be greater than 1 for %s\n", argv[0]); MPI_Abort(MPI_COMM_WORLD, 1); } int blocklen[6] = {SIZE*2,SIZE,SIZE,1,1,1}; MPI_Datatype types[6] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE,MPI_DOUBLE,MPI_DOUBLE,MPI_DOUBLE}; MPI_Datatype mpi_op_type; MPI_Aint offsets[6]; offsets[0] = offsetof(struct option, V_market); offsets[1] = offsetof(struct option, K); offsets[2] = offsetof(struct option, implied_vol); offsets[3] = offsetof(struct option, T); offsets[4] = offsetof(struct option, S); offsets[5] = offsetof(struct option, r); MPI_Type_create_struct(6, blocklen, offsets, types, &mpi_op_type); MPI_Type_commit(&mpi_op_type); if (rank==0) { op[0] = load("./OPT_AAPL/Options_20161118.txt"); op[1] = load("./OPT_AAPL/Options_2017120.txt"); op[2] = load("./OPT_AAPL/Options_2017317.txt"); op[3] = load("./OPT_AAPL/Options_2017421.txt"); op[4] = load("./OPT_AAPL/Options_2017616.txt"); op[5] = load("./OPT_AAPL/Options_20171117.txt"); op[6] = load("./OPT_AAPL/Options_2018119.txt"); } MPI_Bcast(&op,len,mpi_op_type,0,MPI_COMM_WORLD); int elements_per_proc = len / size; int difference = len - elements_per_proc * size; int* chunk_sizes = (int*)(malloc(sizeof(int) * size)); int* displ = (int*)(malloc(sizeof(int) * size)); for(i = 0; i < size; ++i){ chunk_sizes[i] = elements_per_proc; displ[i] = i * elements_per_proc; } chunk_sizes[size - 1] += difference; int current_recv_size = elements_per_proc + (rank == size - 1 ? difference : 0); printf("Process %d has chunk_size:%d and displ:%d\n", rank, current_recv_size, displ[rank]); #pragma omp parallel for default(private) shared(low_vol, epsilon, op) schedule(guided) for(j=displ[rank]; j<displ[rank]+current_recv_size; j++){ for(i=0; i<14; i++) op[j].implied_vol[i] = newton_raphson(op[j].V_market[i][0], low_vol, epsilon, op[j].S, op[j].K[i], op[j].r, op[j].T, op[j].V_market[i][1]); } MPI_Send(&op[displ[rank]], current_recv_size, mpi_op_type, 0, 0, MPI_COMM_WORLD); if(rank == 0){ for (i=0; i<size; i++) { current_recv_size = elements_per_proc + (i == size - 1 ? difference : 0); MPI_Recv(&toReturn, current_recv_size , mpi_op_type, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for (k=0; k<current_recv_size; k++) { for(j=0; j<14; j++) printf("Implied vol. for time %.2f is %.2f%% \n", (toReturn[k].T), toReturn[k].implied_vol[j]); } } fflush(stdout); } free(chunk_sizes); free(displ); MPI_Type_free(&mpi_op_type); MPI_Finalize(); return 0; }
array_out_bound_fix.c
//#include <omp.h> int main(){ int i = 0; int *p = &i; int a[2]; #pragma omp parallel for for(i=1; i<10; i++){ a[i] = a[*p + i + 1]; } }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedAddr Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedAddr Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; /// Return the type of the private item. QualType getPrivateType(unsigned N) const { return cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()) ->getType(); } }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::MapVector<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Emit the number of teams for a target directive. Inspect the num_teams /// clause associated with a teams construct combined or closely nested /// with the target directive. /// /// Emit a team of size one for directives such as 'target parallel' that /// have no associated teams construct. /// /// Otherwise, return nullptr. const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal); llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D); /// Emit the number of threads for a target directive. Inspect the /// thread_limit clause associated with a teams construct combined or closely /// nested with the target directive. /// /// Emit the num_threads clause for directives such as 'target parallel' that /// have no associated teams construct. /// /// Otherwise, return nullptr. const Expr * getNumThreadsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal); llvm::Value * emitNumThreadsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::GlobalVariable>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, bool IgnoreAddressId = false) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::MapVector<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. Will create a distribute call /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::GlobalVariable *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); SmallVector<llvm::Value *, 4> emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy, const OMPTaskDataTy::DependData &Data); void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy, LValue PosLVal, const OMPTaskDataTy::DependData &Data, Address DependenciesArray); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param NumThreads The value corresponding to the num_threads clause, if /// any, or nullptr. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond, llvm::Value *NumThreads); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. virtual void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter = nullptr); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsElemType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsElemType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// The array of original declaration names of mapped pointers sent to the /// runtime library for debugging llvm::Value *MapNamesArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MapNamesArray = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param NumThreads The value corresponding to the num_threads clause, if /// any, or nullptr. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond, llvm::Value *NumThreads) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter = nullptr) override; /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
DRB012-minusminus-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The -- operation is not protected, causing race condition. Data race pair: numNodes2@75 vs. numNodes2@75 */ #include <stdlib.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int len = 100; if (argc > 1) len = atoi(argv[1]); int numNodes = len; int numNodes2 = 0; int x[len]; #pragma omp parallel for private (i) firstprivate (len) for (i = 0; i <= len - 1; i += 1) { if (i % 2 == 0) x[i] = 5; else x[i] = - 5; } #pragma omp parallel for private (i) reduction (+:numNodes2) for (i = numNodes - 1; i >= 0; i += -1) { if (x[i] <= 0) { numNodes2 += - 1; } } printf("%d\n",numNodes2); return 0; }
opencl_krb5pa-sha1_fmt_plug.c
/* * Kerberos 5 "PA ENC TIMESTAMP" by magnum & Dhiru * * Pcap file -> input file: * 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml * 2. krbng2john.py ~/capture.pdml > krb5.in * 3. Run john on krb5.in * * http://www.ietf.org/rfc/rfc4757.txt * http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html * * Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum' * * NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ * packet. * * Default Salt: realm + user * * AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5 * See the following RFC for more details about the crypto & algorithms used: * * RFC3961 - Encryption and Checksum Specifications for Kerberos 5 * RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5 * * march 09 / kevin devine <wyse101 0x40 gmail.com> * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and * released under same terms as above */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_krb5pa_sha1; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_krb5pa_sha1); #else #include <errno.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "common.h" #include "unicode.h" #include "config.h" #include "aes.h" #include "krb5_common.h" #include "common-opencl.h" #define OUTLEN 32 #include "opencl_pbkdf2_hmac_sha1.h" #include "hmac_sha.h" #include "loader.h" #define FORMAT_LABEL "krb5pa-sha1-opencl" #define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */ #define FORMAT_TAG "$krb5pa$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 12 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MAX_SALTLEN 52 #define MAX_REALMLEN MAX_SALTLEN #define MAX_USERLEN MAX_SALTLEN #define TIMESTAMP_SIZE 44 #define CHECKSUM_SIZE BINARY_SIZE #define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* This handles all sizes */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) /* This is faster but can't handle size 3 */ //#define GETPOS(i, index) (((index) & (ocl_v_width - 1)) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static struct fmt_tests tests[] = { {"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"}, {"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"}, {"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, /* etype 17 hash obtained using MiTM etype downgrade attack */ {"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"}, {NULL}, }; static cl_mem mem_in, mem_out, mem_salt, mem_state, pinned_in, pinned_out; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; static struct fmt_main *self; static struct custom_salt { int type; int etype; unsigned char realm[64]; unsigned char user[64]; unsigned char salt[64]; /* realm + user */ unsigned char ct[TIMESTAMP_SIZE]; } *cur_salt; static unsigned char constant[16]; static unsigned char ke_input[16]; static unsigned char ki_input[16]; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_salt currentsalt; static pbkdf2_out *output; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static int new_keys; #define ITERATIONS (4096 - 1) #define HASH_LOOPS 105 // Must be made from factors 3, 3, 5, 7, 13 #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: ", ", init: ", ", loop: ", ", inter: ", ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = 64 * gws; // Allocate memory pinned_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating pinned in"); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); inbuffer = clEnqueueMapBuffer(queue[gpu_id], pinned_in, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, key_buf_size, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(pbkdf2_salt), &currentsalt, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); pinned_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating pinned out"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); output = clEnqueueMapBuffer(queue[gpu_id], pinned_out, CL_TRUE, CL_MAP_READ, 0, sizeof(pbkdf2_out) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); crypt_out = mem_alloc(sizeof(*crypt_out) * gws); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_in, inbuffer, 0, NULL, NULL), "Error Unmapping mem in"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_out, output, 0, NULL, NULL), "Error Unmapping mem in"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(pinned_in), "Release pinned_in"); HANDLE_CLERROR(clReleaseMemObject(pinned_out), "Release pinned_out"); HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release pinned_in"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem_out"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem_salt"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release Kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release Kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release Kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { unsigned char usage[5]; static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), ALGORITHM_NAME " %ux", ocl_v_width); self->params.algorithm_name = valgo; } // generate 128 bits from 40 bits of "kerberos" string nfold(8 * 8, (unsigned char*)"kerberos", 128, constant); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0xAA; // used to derive Ke nfold(sizeof(usage) * 8, usage, sizeof(ke_input) * 8, ke_input); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0x55; // used to derive Ki nfold(sizeof(usage) * 8, usage, sizeof(ki_input) * 8, ki_input); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[128]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DITERATIONS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, ITERATIONS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2 * HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 4 * ITERATIONS + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 5000000000ULL)); } } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *data = ciphertext; int type, saltlen = 0; // tag is mandatory if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; data += FORMAT_TAG_LEN; // etype field, 17 or 18 p = strchr(data, '$'); if (!p || p - data != 2) return 0; type = atoi(data); if (type < 17 || type > 18) return 0; data = p + 1; // user field p = strchr(data, '$'); if (!p || p - data > MAX_USERLEN) return 0; saltlen += p - data; data = p + 1; // realm field p = strchr(data, '$'); if (!p || p - data > MAX_REALMLEN) return 0; saltlen += p - data; data = p + 1; // salt field p = strchr(data, '$'); if (!p) return 0; // if salt is empty, realm.user is used instead if (p - data) saltlen = p - data; data = p + 1; // We support a max. total salt length of 52. // We could opt to emit a warning if rejected here. if (saltlen > MAX_SALTLEN) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } // 56 bytes (112 hex chars) encrypted timestamp + checksum if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) || strspn(data, HEXCHARS_all) != strlen(data)) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.etype = atoi(p); p = strtokm(NULL, "$"); if (p[-1] == '$') cs.user[0] = 0; else { strcpy((char*)cs.user, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') cs.realm[0] = 0; else { strcpy((char*)cs.realm, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') { strcpy((char*)cs.salt, (char*)cs.realm); strcat((char*)cs.salt, (char*)cs.user); } else { strcpy((char*)cs.salt, p); p = strtokm(NULL, "$"); } for (i = 0; i < TIMESTAMP_SIZE; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TOTAL_LENGTH + 1]; char in[TOTAL_LENGTH + 1]; char salt[MAX_SALTLEN + 1]; char *data; char *e, *u, *r, *s, *tc; strnzcpy(in, ciphertext, sizeof(in)); tc = strrchr(in, '$'); *tc++ = 0; s = strrchr(in, '$'); *s++ = 0; r = strrchr(in, '$'); *r++ = 0; u = strrchr(in, '$'); *u++ = 0; e = in + 8; /* Default salt is user.realm */ if (!*s) { snprintf(salt, sizeof(salt), "%s%s", r, u); s = salt; } snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc); data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1; strlwr(data); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */ for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; currentsalt.length = strlen((char*)cur_salt->salt); currentsalt.iterations = ITERATIONS; memcpy(currentsalt.salt, cur_salt->salt, currentsalt.length); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; int key_size; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (cur_salt->etype == 17) key_size = 16; else key_size = 32; // Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel (2nd pass)"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[4]), "Run final kernel (SHA1)"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Failed running final kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[5]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { unsigned char base_key[32]; unsigned char Ke[32]; unsigned char plaintext[TIMESTAMP_SIZE]; // pbkdf2((const unsigned char*)saved_key[i], len, (unsigned char *)cur_salt->salt,strlen((char*)cur_salt->salt), 4096, (unsigned int*)tkey); dk(base_key, (unsigned char*)output[i].dk, key_size, constant, 32); dk(Ke, base_key, key_size, ke_input, 32); // Decrypt the AS-REQ timestamp encrypted with 256-bit AES. krb_decrypt(cur_salt->ct, TIMESTAMP_SIZE, plaintext, Ke, key_size); // Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and // bail out if we are out of luck. if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') { unsigned char Ki[32]; unsigned char checksum[20]; dk(Ki, base_key, key_size, ki_input, 32); // derive checksum of plaintext (only 96 bits used out of 160) hmac_sha1(Ki, key_size, plaintext, TIMESTAMP_SIZE, checksum, 20); memcpy(crypt_out[i], checksum, BINARY_SIZE); } else { memset(crypt_out[i], 0, BINARY_SIZE); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_krb5pa_sha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__rminus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint32) // A*D function (colscale): GB (_AxD__rminus_uint32) // D*A function (rowscale): GB (_DxB__rminus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint32) // C=scalar+B GB (_bind1st__rminus_uint32) // C=scalar+B' GB (_bind1st_tran__rminus_uint32) // C=A+scalar GB (_bind2nd__rminus_uint32) // C=A'+scalar GB (_bind2nd_tran__rminus_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT32 || GxB_NO_RMINUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause() { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait() { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
pt.h
////////////////////////////////////////////////////////////////////////////////// // // // trueke // // A multi-GPU implementation of the exchange Monte Carlo method. // // // ////////////////////////////////////////////////////////////////////////////////// // // // Copyright © 2015 Cristobal A. Navarro, Wei Huang. // // // // This file is part of trueke. // // trueke is free software: you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation, either version 3 of the License, or // // (at your option) any later version. // // // // trueke is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with trueke. If not, see <http://www.gnu.org/licenses/>. // // // ////////////////////////////////////////////////////////////////////////////////// #ifndef _PT_H_ #define _PT_H_ /* forward function declarations */ void ptenergies(setup_t *s, int tid, int a, int b); int exchange(setup_t *s, int tid, int a, int b, int p); void measure(setup_t *s, int tid, int a, int b, int p); void pt(setup_t *s, int tid, int a, int b); void swap(setup_t *s, int a, int b ); /* pt(): parallel tempering main loop */ void pt(setup_t *s, int tid, int a, int b){ /* reset ex counters */ reset_array<float>((float*)(s->ex + tid*(b-a)), b-a, 0.0f); /* reset average ex counters */ reset_array<float>((float*)(s->avex + tid*(b-a)), b-a, 0.0f); /* average exchanges */ double avex = 0.0; /* progress printing */ if( tid == 0 ){ printf("ptsteps.............0%%"); fflush(stdout); sdkResetTimer(&(s->timer)); sdkStartTimer(&(s->timer)); } /* parallel tempering */ for(int p = 0; p < s->pts; ++p){ /* pt metropolis phase */ //if(tid == 0) // printf("antes de metropolis\n"); //ptenergies(s, tid, a, b); //#pragma omp barrier //if(tid == 0){ // printarray<float>(s->exE, s->R, "exE"); //} //#pragma omp barrier metropolis(s, tid, a, b, s->ms); //#pragma omp barrier /* compute energies for exchange */ ptenergies(s, tid, a, b); #ifdef MEASURE /* measure */ if((p % s->period) == 0){ //printf("measuring at ptstep = %i period = %i\n", p, s->period); measure(s, tid, a, b, p); } #endif /* exchange phase */ avex += (double)exchange(s, tid, a, b, p)/(double)s->pts; /* progress printing */ if(tid == 0){ printf("\rptsteps............%i%%", (100*(p+1))/s->pts); fflush(stdout); //printf("\navex = %f\n", avex); } } /* compute the average exchange for each replica r_i and r_i-1 */ for(int i = a; i < b; ++i){ /* multiply by 2 because it alternates odd and even */ s->avex[i] = 2.0 * s->ex[i] / (double)s->pts; } /* progress printing */ if( tid == 0 ){ sdkStopTimer(&(s->timer)); printf(" %.3fs ", sdkGetTimerValue(&(s->timer))/1000.0f); printf("\t[<ex> = %.3f]\n\n", avex / ((double)(s->R-1)/2.0)); //printindexarray<float>(s->exE, s->rts, s->R, "exE"); //printarray<float>(s->avex, s->R, "avex"); //printarray<float>(s->ex, s->R, "ex"); } } /* exchange phase */ int exchange(setup_t *s, int tid, int a, int b, int p){ /* count the number of exchanges */ int ex = 0; /* sync openmp threads before entering the exchange phase */ #pragma omp barrier if(tid == 0){ double delta = 0.0; /* traverse in reverse temperature order */ //printf("\n"); for(int k = s->R-1; k > 0; --k){ /* alternate between odd and even replicas */ //printf("s->R = %i ex[%i] = %f\n", s->R, k, s->ex[k] ); if((k % 2) == (p % 2)){ //printf("got out\n"); continue; } delta = (1.0f/s->T[k] - 1.0f/s->T[k-1]) * (s->exE[s->rts[k-1]] - s->exE[s->rts[k]]); double randme = gpu_rand01(&s->hpcgs, &s->hpcgi); //printf("\ndelta = %f e^-delta = %f (rand = %f)...", delta, exp((double)-delta), randme); //getchar(); // do the exchange //printf("pstep = %i exchange %i --- %i\n", p, k, k-1); //getchar(); //if( delta < 0.0 || randn() < exp(-delta) ){ if( delta < 0.0 || randme < exp(-delta) ){ /* temp swap function */ //printf("<before> spins for %i and %i\n", k-1, k); //printspins(s->dlat[s->rts[k-1]], s->L, 20); //printspins(s->dlat[s->rts[k]], s->L, 20); /* swap temperatures */ //printf(" YES\n"); swap(s, k-1, k); /* global counter */ ex++; /* local counter */ s->ex[k] += 1.0f; //printf("%i <--> %i\n", k, k-1); //printf("<after> spins for %i and %i\n", k-1, k); //printspins(s->dlat[s->m[k-1]], s->L, 20); //printspins(s->dlat[s->m[k]], s->L, 20); //printarray<int>(s->rts, s->R, "rts"); //printarray<float>(s->T, s->R, "T"); //printf("\n"); //printarray<int>(array, s->R, "R"); //printarray<int>(s->trs, s->R, "trs"); //getchar(); } else{ //printf("NO\n"); fflush(stdout); } //getchar(); /* set energy to zero */ //s->exE[s->rts[k]] = 0.0; } //for(int i = 0; i < s->R; ++i){ // printf("exE[%i] = %f\n", i, s->exE[s->rts[i]]); //} /* set energy of last replica to zero */ //s->exE[s->rts[s->R-1]] = 0.0; //getchar(); } /* sync again */ #pragma omp barrier return ex; } /* energies for all replicas using Kepler shfl reductions and streams */ void ptenergies(setup_t *s, int tid, int a, int b){ /* quick reset of the device reduction variables */ kernel_reset<float><<< (b-a + BLOCKSIZE1D - 1)/BLOCKSIZE1D, BLOCKSIZE1D, 0, s->rstream[a] >>> (s->dE[tid], b-a, 0.0f); cudaDeviceSynchronize(); cudaCheckErrors("kernel_reset dE"); /* compute one energy reduction for each replica */ for(int k = a; k < b; ++k){ /* launch reduction kernel for k-th replica */ //s->E[k] = 0.0f; redenergy(s, tid, a, b, k); //cudaMemcpy(s->hlat[k], s->dlat[k], sizeof(int)*s->N, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); //double cpuval = (double)cpuE(s->hlat[k], s->hH, s->h, s->L, s->L, s->L); //double mval = (double)cpuM(s->hlat[k], s->L, s->L, s->L); //s->E[k] = (float)cpuval; //printf("tid=%i cpu E[%i] = %f\n", tid, k, s->E[k]); fflush(stdout); //printf("tid=%i cpu M[%i] = %f\n", tid, k, mval); fflush(stdout); } cudaDeviceSynchronize(); cudaCheckErrors("kernel_redenergy"); cudaMemcpy(s->exE + a, s->dE[tid], (b-a)*sizeof(float), cudaMemcpyDeviceToHost); //printf("\ntid=%i a b %i %i\n", tid, a, b); fflush(stdout); //printf("a = %i b = %i\n", a, b); //for(int k = a; k < b; ++k){ // printf("compute E[%i] = %f\n", k, s->E[k]); //} //getchar(); } /* swap temperatures */ void swap(setup_t *s, int a, int b ){ //printf("\nswapping T%i T%i\n", a, b); int t1, t2, taux, raux; t1 = s->rts[a]; t2 = s->rts[b]; taux = s->trs[t1]; raux = s->rts[a]; /* swap rts */ s->rts[a] = s->rts[b]; s->rts[b] = raux; /* swap trs */ s->trs[t1] = s->trs[t2]; s->trs[t2] = taux; } /* measure phase occurs at mzone */ #ifdef MEASURE void measure(setup_t *s, int tid, int a, int b, int p){ if( p >= s->mzone ){ for(int i=0; i< s->blocks; i++){ reset_mcmc_statistics( s, tid, a, b); simulation(s, tid, a, b); accum_block_statistics( s, tid, a, b ); } } } #endif #endif
GB_unaryop__one_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_uint32_uint32 // op(A') function: GB_tran__one_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-14,16)),ceild(3*t1-30,32)),ceild(24*t2-Nz-115,128)),ceild(4*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(12*t1+Nx+15,128)),floord(24*t2+Nx+11,128)),floord(4*t3+Nx-9,128)),floord(24*t1-24*t2+Nz+Nx+13,128));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),32*t4+30);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
hot.c
#include "hot.h" #include "../../comms.h" #include "../../profiler.h" #include "../hot_data.h" #include "../hot_interface.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> // Performs the CG solve, you always want to perform these steps, regardless // of the context of the problem etc. void solve_diffusion_2d(const int nx, const int ny, Mesh* mesh, const int max_inners, const double dt, const double heat_capacity, const double conductivity, double* temperature, double* r, double* p, double* density, double* s_x, double* s_y, double* Ap, int* end_niters, double* end_error, double* reduce_array, const double* edgedx, const double* edgedy) { // Store initial residual double local_old_r2 = initialise_cg(nx, ny, mesh->pad, dt, heat_capacity, conductivity, p, r, temperature, density, s_x, s_y, edgedx, edgedy); double global_old_r2 = reduce_all_sum(local_old_r2); START_PROFILING(&compute_profile); handle_boundary_2d(nx, ny, mesh, p, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, temperature, NO_INVERT, PACK); STOP_PROFILING(&compute_profile, "boundary"); // TODO: Can one of the allreduces be removed with kernel fusion? int ii = 0; for (ii = 0; ii < max_inners; ++ii) { const double local_pAp = calculate_pAp(nx, ny, mesh->pad, s_x, s_y, p, Ap); const double global_pAp = reduce_all_sum(local_pAp); const double alpha = global_old_r2 / global_pAp; const double local_new_r2 = calculate_new_r2(nx, ny, mesh->pad, alpha, temperature, p, r, Ap); const double global_new_r2 = reduce_all_sum(local_new_r2); const double beta = global_new_r2 / global_old_r2; START_PROFILING(&compute_profile); handle_boundary_2d(nx, ny, mesh, temperature, NO_INVERT, PACK); STOP_PROFILING(&compute_profile, "boundary"); // Check if the solution has converged if (fabs(global_new_r2) < EPS) { global_old_r2 = global_new_r2; if(mesh->rank == MASTER) { printf("Successfully converged.\n"); } break; } update_conjugate(nx, ny, mesh->pad, beta, r, p); START_PROFILING(&compute_profile); handle_boundary_2d(nx, ny, mesh, p, NO_INVERT, PACK); STOP_PROFILING(&compute_profile, "boundary"); // Store the old squared residual global_old_r2 = global_new_r2; } *end_niters = ii + 1; *end_error = global_old_r2; } // Initialises the CG solver double initialise_cg(const int nx, const int ny, const int pad, const double dt, const double heat_capacity, const double conductivity, double* p, double* r, const double* temperature, const double* density, double* s_x, double* s_y, const double* edgedx, const double* edgedy) { START_PROFILING(&compute_profile); // https://inldigitallibrary.inl.gov/sti/3952796.pdf // Take the average of the coefficients at the cells surrounding // each face #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { s_x[(ii) * (nx + 1) + (jj)] = (dt * conductivity * (density[(ii)*nx + (jj)] + density[(ii)*nx + (jj - 1)])) / (2.0 * density[(ii)*nx + (jj)] * density[(ii)*nx + (jj - 1)] * edgedx[jj] * edgedx[jj] * heat_capacity); } } #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { s_y[(ii)*nx + (jj)] = (dt * conductivity * (density[(ii)*nx + (jj)] + density[(ii - 1) * nx + (jj)])) / (2.0 * density[(ii)*nx + (jj)] * density[(ii - 1) * nx + (jj)] * edgedy[ii] * edgedy[ii] * heat_capacity); } } double initial_r2 = 0.0; #pragma omp parallel for reduction(+ : initial_r2) for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { r[(ii)*nx + (jj)] = temperature[(ii)*nx + (jj)] - ((s_y[(ii)*nx + (jj)] + s_x[(ii) * (nx + 1) + (jj)] + 1.0 + s_x[(ii) * (nx + 1) + (jj + 1)] + s_y[(ii + 1) * nx + (jj)]) * temperature[(ii)*nx + (jj)] - s_y[(ii)*nx + (jj)] * temperature[(ii - 1) * nx + (jj)] - s_x[(ii) * (nx + 1) + (jj)] * temperature[(ii)*nx + (jj - 1)] - s_x[(ii) * (nx + 1) + (jj + 1)] * temperature[(ii)*nx + (jj + 1)] - s_y[(ii + 1) * nx + (jj)] * temperature[(ii + 1) * nx + (jj)]); p[(ii)*nx + (jj)] = r[(ii)*nx + (jj)]; initial_r2 += r[(ii)*nx + (jj)] * r[(ii)*nx + (jj)]; } } STOP_PROFILING(&compute_profile, "initialise cg"); return initial_r2; } // Calculates a value for alpha double calculate_pAp(const int nx, const int ny, const int pad, const double* s_x, const double* s_y, double* p, double* Ap) { START_PROFILING(&compute_profile); // You don't need to use a matrix as the band matrix is fully predictable // from the 5pt stencil double pAp = 0.0; #pragma omp parallel for reduction(+ : pAp) for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { Ap[(ii)*nx + (jj)] = (s_y[(ii)*nx + (jj)] + s_x[(ii) * (nx + 1) + (jj)] + 1.0 + s_x[(ii) * (nx + 1) + (jj + 1)] + s_y[(ii + 1) * nx + (jj)]) * p[(ii)*nx + (jj)] - s_y[(ii)*nx + (jj)] * p[(ii - 1) * nx + (jj)] - s_x[(ii) * (nx + 1) + (jj)] * p[(ii)*nx + (jj - 1)] - s_x[(ii) * (nx + 1) + (jj + 1)] * p[(ii)*nx + (jj + 1)] - s_y[(ii + 1) * nx + (jj)] * p[(ii + 1) * nx + (jj)]; pAp += p[(ii)*nx + (jj)] * Ap[(ii)*nx + (jj)]; } } STOP_PROFILING(&compute_profile, "calculate alpha"); return pAp; } // Updates the current guess using the calculated alpha double calculate_new_r2(const int nx, const int ny, const int pad, double alpha, double* temperature, double* p, double* r, double* Ap) { START_PROFILING(&compute_profile); double new_r2 = 0.0; #pragma omp parallel for reduction(+ : new_r2) for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { temperature[(ii)*nx + (jj)] += alpha * p[(ii)*nx + (jj)]; r[(ii)*nx + (jj)] -= alpha * Ap[(ii)*nx + (jj)]; new_r2 += r[(ii)*nx + (jj)] * r[(ii)*nx + (jj)]; } } STOP_PROFILING(&compute_profile, "calculate new r2"); return new_r2; } // Updates the conjugate from the calculated beta and residual void update_conjugate(const int nx, const int ny, const int pad, const double beta, const double* r, double* p) { START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { p[(ii)*nx + (jj)] = r[(ii)*nx + (jj)] + beta * p[(ii)*nx + (jj)]; } } STOP_PROFILING(&compute_profile, "update conjugate"); }
GB_unop__ainv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_uint64_uint64 // op(A') function: GB_unop_tran__ainv_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
estimate_dt_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela, Ruben Zorrilla // // #ifndef KRATOS_ESTIMATE_DT_UTILITIES_H #define KRATOS_ESTIMATE_DT_UTILITIES_H // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "includes/element.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "includes/serializer.h" #include "utilities/openmp_utils.h" #include "utilities/geometry_utilities.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Estimate the time step in a fluid problem to obtain a given Courant number. template< unsigned int TDim > class EstimateDtUtility { public: ///@name Life Cycle ///@{ /// Constructor /** * @param ModelPart The model part containing the problem mesh * @param CFL The user-defined Courant-Friedrichs-Lewy number * @param DtMin user-defined minimum time increment allowed * @param DtMax user-defined maximum time increment allowed */ EstimateDtUtility(ModelPart &ModelPart, const double CFL, const double DtMin, const double DtMax): mrModelPart(ModelPart) { mCFL = CFL; mDtMin = DtMin; mDtMax = DtMax; } /// Constructor with Kratos parameters /** * @param ModelPart The model part containing the problem mesh * @param rParameters Kratos parameters containing the CFL number and max time step */ EstimateDtUtility(ModelPart& ModelPart, Parameters& rParameters): mrModelPart(ModelPart) { Parameters defaultParameters(R"({ "automatic_time_step" : true, "CFL_number" : 1.0, "minimum_delta_time" : 1e-4, "maximum_delta_time" : 0.1 })"); rParameters.ValidateAndAssignDefaults(defaultParameters); mCFL = rParameters["CFL_number"].GetDouble(); mDtMin = rParameters["minimum_delta_time"].GetDouble(); mDtMax = rParameters["maximum_delta_time"].GetDouble(); } /// Destructor ~EstimateDtUtility() {} ///@} ///@name Operations ///@{ /// Set the CFL value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetCFL(const double CFL) { mCFL = CFL; } /// Set the maximum time step allowed value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetDtMin(const double DtMin) { mDtMin = DtMin; } /// Set the maximum time step allowed value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetDtMax(const double DtMax) { mDtMax = DtMax; } /// Calculate the maximum time step that satisfies the Courant-Friedrichs-Lewy (CFL) condition. /** * @return A time step value that satisfies the CFL condition for the current mesh and velocity field */ double EstimateDt() { KRATOS_TRY; unsigned int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfElements(),NumThreads,ElementPartition); double CurrentDt = mrModelPart.GetProcessInfo().GetValue(DELTA_TIME); std::vector<double> MaxCFL(NumThreads,0.0); #pragma omp parallel shared(MaxCFL) { int k = OpenMPUtils::ThisThread(); ModelPart::ElementIterator ElemBegin = mrModelPart.ElementsBegin() + ElementPartition[k]; ModelPart::ElementIterator ElemEnd = mrModelPart.ElementsBegin() + ElementPartition[k+1]; GeometryDataContainer GeometryInfo; double MaxLocalCFL = 0.0; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { double ElementCFL = CalculateElementCFL(*itElem,GeometryInfo,CurrentDt); if (ElementCFL > MaxLocalCFL) { MaxLocalCFL = ElementCFL; } } MaxCFL[k] = MaxLocalCFL; } // Reduce to maximum the thread results // Note that MSVC14 does not support max reductions, which are part of OpenMP 3.1 double CurrentCFL = MaxCFL[0]; for (unsigned int k = 1; k < NumThreads; k++) { if (CurrentCFL > MaxCFL[k]) CurrentCFL = MaxCFL[k]; } double NewDt = 0.0; // Avoid division by 0 when the maximum CFL number is close to 0 (e.g. problem initialization) if (CurrentCFL < 1e-10) { KRATOS_INFO("EstimateDtUtility") << "Setting minimum delta time " << mDtMin << " as current time step." << std::endl; NewDt = mDtMin; } else { // Compute new Dt NewDt = mCFL * CurrentDt / CurrentCFL; // Limit max and min Dt if (NewDt > mDtMax) { NewDt = mDtMax; } else if (NewDt < mDtMin) { NewDt = mDtMin; } } // Perform MPI sync if needed NewDt = mrModelPart.GetCommunicator().GetDataCommunicator().MinAll(NewDt); return NewDt; KRATOS_CATCH("") } /// Calculate each element's CFL for the current time step. /** * The elemental CFL is stored in the CFL_NUMBER elemental variable. * To view it in the post-process file, remember to print CFL_NUMBER as a Gauss Point result. */ void CalculateLocalCFL() { KRATOS_TRY; unsigned int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfElements(),NumThreads,ElementPartition); const double CurrentDt = mrModelPart.GetProcessInfo().GetValue(DELTA_TIME); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementIterator ElemBegin = mrModelPart.ElementsBegin() + ElementPartition[k]; ModelPart::ElementIterator ElemEnd = mrModelPart.ElementsBegin() + ElementPartition[k+1]; GeometryDataContainer GeometryInfo; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { double ElementCFL = CalculateElementCFL(*itElem,GeometryInfo,CurrentDt); itElem->SetValue(CFL_NUMBER,ElementCFL); } } KRATOS_CATCH("") } ///@} // Operators private: ///@name Auxiliary Data types ///@{ struct GeometryDataContainer { double Area; array_1d<double, TDim+1> N; BoundedMatrix<double, TDim+1, TDim> DN_DX; }; ///@} ///@name Member Variables ///@{ double mCFL; // User-defined CFL number double mDtMax; // User-defined maximum time increment allowed double mDtMin; // User-defined minimum time increment allowed ModelPart &mrModelPart; // The problem's model part ///@} // Member variables ///@name Private Operations ///@{ double CalculateElementCFL(Element &rElement, GeometryDataContainer& rGeometryInfo, double Dt) { double Proj = 0.0; // Get the element's geometric parameters Geometry< Node<3> >& rGeom = rElement.GetGeometry(); GeometryUtils::CalculateGeometryData(rGeom, rGeometryInfo.DN_DX, rGeometryInfo.N, rGeometryInfo.Area); // Elemental Velocity array_1d<double,3> ElementVel = rGeometryInfo.N[0]*rGeom[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int i = 1; i < TDim+1; ++i) ElementVel += rGeometryInfo.N[i]*rGeom[i].FastGetSolutionStepValue(VELOCITY); // Calculate u/h as the maximum projection of the velocity along element heights for (unsigned int i = 0; i < TDim+1; ++i) { for (unsigned int d = 0; d < TDim; ++d) Proj += ElementVel[d]*rGeometryInfo.DN_DX(i,d); Proj = fabs(Proj); } return Proj*Dt; } ///@} // Private Operations }; ///@} // Kratos classes ///@} } // namespace Kratos. #endif /* KRATOS_ESTIMATE_DT_UTILITIES_H */
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantumScale*alpha * QuantumScale*beta; ** opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also define that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { double SaSca; if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); SaSca=Sa*PerceptibleReciprocal(Sca); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da*PerceptibleReciprocal(Dca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType Sc, const MagickRealType Sa,const MagickRealType Dc,const MagickRealType Da) { if (((Sc*Sa)+(Dc*Da)) <= QuantumRange) return((Sc*Sa)+Dc*Da); return(((Sc*Sa)+Dc*Da)-QuantumRange); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType Sc, const MagickRealType Sa,const MagickRealType Dc,const MagickRealType Da) { if (((Sc*Sa)-(Dc*Da)) <= 0.0) return((Sc*Sa)-Dc*Da); return(((Sc*Sa)-Dc*Da)+QuantumRange); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)*PerceptibleReciprocal(Da)+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { MagickRealType alpha, beta; alpha=Dca*PerceptibleReciprocal(Da); if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)*PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); switch (compose) { case CopyCompositeOp: case CopyRedCompositeOp: case CopyGreenCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyMagentaCompositeOp: case CopyYellowCompositeOp: case CopyBlackCompositeOp: break; default: { if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); break; } } (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) memcpy(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) memcpy(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; value=GetImageArtifact(image,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) != MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { composite.red=(MagickRealType) GetPixelRed(p); composite.opacity=(composite.opacity+canvas.opacity/2); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) memcpy(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) memcpy(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
c-decl.c
/* Process declarations and variables for C compiler. Copyright (C) 1988-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #define INCLUDE_UNIQUE_PTR #include "system.h" #include "coretypes.h" #include "target.h" #include "function.h" #include "c-tree.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "intl.h" #include "print-tree.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "toplev.h" #include "debug.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "c-lang.h" #include "langhooks.h" #include "tree-iterator.h" #include "dumpfile.h" #include "plugin.h" #include "c-family/c-ada-spec.h" #include "builtins.h" #include "spellcheck-tree.h" #include "gcc-rich-location.h" #include "asan.h" #include "c-family/name-hint.h" #include "c-family/known-headers.h" #include "c-family/c-spellcheck.h" /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* Whether this prototype was built-in. */ static bool current_function_prototype_built_in; /* The argument type information of this prototype. */ static tree current_function_prototype_arg_types; /* The argument information structure for the function currently being defined. */ static struct c_arg_info *current_function_arg_info; /* The obstack on which parser and related data structures, which are not live beyond their top-level declaration or definition, are allocated. */ struct obstack parser_obstack; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when the current toplevel function contains a declaration of a nested function which is never defined. */ static bool undef_nested_function; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int current_omp_declare_target_attribute; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The ->u.type field stores the type of the declaration in this scope; if NULL, the type is the type of the ->decl field. This is only of relevance for objects with external or internal linkage which may be redeclared in inner scopes, forming composite types that only persist for the duration of those scopes. In the external scope, this stores the composite of all the types declared for this object, visible or not. The ->inner_comp field (used only at file scope) stores whether an incomplete array type at file scope was completed at an inner scope to an array size other than 1. The ->u.label field is used for labels. It points to a structure which stores additional information used for warnings. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct GTY((chain_next ("%h.prev"))) c_binding { union GTY(()) { /* first so GTY desc can use decl */ tree GTY((tag ("0"))) type; /* the type in this scope */ struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */ } GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u; tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */ BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */ location_t locus; /* location for nested bindings */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct GTY(()) lang_identifier { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The binding oracle; see c-tree.h. */ void (*c_binding_oracle) (enum c_oracle_request, tree identifier); /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's symbol binding. */ #define I_SYMBOL_CHECKED(node) \ (TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding* * i_symbol_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->symbol_binding == NULL && c_binding_oracle != NULL && !I_SYMBOL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_SYMBOL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_SYMBOL, node); } return &lid->symbol_binding; } #define I_SYMBOL_BINDING(node) (*i_symbol_binding (node)) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's tag binding. */ #define I_TAG_CHECKED(node) \ (TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_tag_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->tag_binding == NULL && c_binding_oracle != NULL && !I_TAG_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_TAG_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_TAG, node); } return &lid->tag_binding; } #define I_TAG_BINDING(node) (*i_tag_binding (node)) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's label binding. */ #define I_LABEL_CHECKED(node) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_label_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->label_binding == NULL && c_binding_oracle != NULL && !I_LABEL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_LABEL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_LABEL, node); } return &lid->label_binding; } #define I_LABEL_BINDING(node) (*i_label_binding (node)) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* The resulting tree type. */ union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Track bindings and other things that matter for goto warnings. For efficiency, we do not gather all the decls at the point of definition. Instead, we point into the bindings structure. As scopes are popped, we update these structures and gather the decls that matter at that time. */ struct GTY(()) c_spot_bindings { /* The currently open scope which holds bindings defined when the label was defined or the goto statement was found. */ struct c_scope *scope; /* The bindings in the scope field which were defined at the point of the label or goto. This lets us look at older or newer bindings in the scope, as appropriate. */ struct c_binding *bindings_in_scope; /* The number of statement expressions that have started since this label or goto statement was defined. This is zero if we are at the same statement expression level. It is positive if we are in a statement expression started since this spot. It is negative if this spot was in a statement expression and we have left it. */ int stmt_exprs; /* Whether we started in a statement expression but are no longer in it. This is set to true if stmt_exprs ever goes negative. */ bool left_stmt_expr; }; /* This structure is used to keep track of bindings seen when a goto statement is defined. This is only used if we see the goto statement before we see the label. */ struct GTY(()) c_goto_bindings { /* The location of the goto statement. */ location_t loc; /* The bindings of the goto statement. */ struct c_spot_bindings goto_bindings; }; typedef struct c_goto_bindings *c_goto_bindings_p; /* The additional information we keep track of for a label binding. These fields are updated as scopes are popped. */ struct GTY(()) c_label_vars { /* The shadowed c_label_vars, when one label shadows another (which can only happen using a __label__ declaration). */ struct c_label_vars *shadowed; /* The bindings when the label was defined. */ struct c_spot_bindings label_bindings; /* A list of decls that we care about: decls about which we should warn if a goto branches to this label from later in the function. Decls are added to this list as scopes are popped. We only add the decls that matter. */ vec<tree, va_gc> *decls_in_scope; /* A list of goto statements to this label. This is only used for goto statements seen before the label was defined, so that we can issue appropriate warnings for them. */ vec<c_goto_bindings_p, va_gc> *gotos; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct GTY((chain_next ("%h.outer"))) c_scope { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we saw [*] in this scope. Used to give an error messages if these appears in a function definition. */ BOOL_BITFIELD had_vla_unspec : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; /* True means that an unsuffixed float constant is _Decimal64. */ BOOL_BITFIELD float_const_decimal64 : 1; /* True if this scope has any label bindings. This is used to speed up searching for labels when popping scopes, particularly since labels are normally only found at function scope. */ BOOL_BITFIELD has_label_bindings : 1; /* True if we should issue a warning if a goto statement crosses any of the bindings. We still need to check the list of bindings to find the specific ones we need to warn about. This is true if decl_jump_unsafe would return true for any of the bindings. This is used to avoid looping over all the bindings unnecessarily. */ BOOL_BITFIELD has_jump_unsafe_decl : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ BLOCK_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ BLOCK_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* A c_inline_static structure stores details of a static identifier referenced in a definition of a function that may be an inline definition if no subsequent declaration of that function uses "extern" or does not use "inline". */ struct GTY((chain_next ("%h.next"))) c_inline_static { /* The location for a diagnostic. */ location_t location; /* The function that may be an inline definition. */ tree function; /* The object or function referenced. */ tree static_decl; /* What sort of reference this is. */ enum c_inline_static_type type; /* The next such structure or NULL. */ struct c_inline_static *next; }; /* List of static identifiers used or referenced in functions that may be inline definitions. */ static GTY(()) struct c_inline_static *c_inline_statics; /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* A vector of pointers to c_binding structures. */ typedef struct c_binding *c_binding_ptr; /* Information that we keep for a struct or union while it is being parsed. */ class c_struct_parse_info { public: /* If warn_cxx_compat, a list of types defined within this struct. */ auto_vec<tree> struct_types; /* If warn_cxx_compat, a list of field names which have bindings, and which are defined in this struct, but which are not defined in any enclosing struct. This is used to clear the in_struct field of the c_bindings structure. */ auto_vec<c_binding_ptr> fields; /* If warn_cxx_compat, a list of typedef names used when defining fields in this struct. */ auto_vec<tree> typedefs_seen; }; /* Information for the struct or union currently being parsed, or NULL if not parsing a struct or union. */ static class c_struct_parse_info *struct_parse_info; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (location_t, tree, int); static tree grokdeclarator (const struct c_declarator *, struct c_declspecs *, enum decl_context, bool, tree *, tree *, tree *, bool *, enum deprecated_states); static tree grokparms (struct c_arg_info *, bool); static void layout_array_type (tree); static void warn_defaults_to (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); static const char *header_for_builtin_fn (tree); /* T is a statement. Add it to the statement-tree. This is the C/ObjC version--C++ has a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ if (!building_stmt_list_p ()) push_stmt_list (); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a pointer type using the default pointer mode. */ static tree c_build_pointer_type (tree to_type) { addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC : TYPE_ADDR_SPACE (to_type); machine_mode pointer_mode; if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode) pointer_mode = targetm.addr_space.pointer_mode (as); else pointer_mode = c_default_pointer_mode; return build_pointer_type_for_mode (to_type, pointer_mode, false); } /* Return true if we will want to say something if a goto statement crosses DECL. */ static bool decl_jump_unsafe (tree decl) { if (error_operand_p (decl)) return false; /* Don't warn for compound literals. If a goto statement crosses their initialization, it should cross also all the places where the complit is used or where the complit address might be saved into some variable, so code after the label to which goto jumps should not be able to refer to the compound literal. */ if (VAR_P (decl) && C_DECL_COMPOUND_LITERAL_P (decl)) return false; /* Always warn about crossing variably modified types. */ if ((VAR_P (decl) || TREE_CODE (decl) == TYPE_DECL) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) return true; /* Otherwise, only warn if -Wgoto-misses-init and this is an initialized automatic decl. */ if (warn_jump_misses_init && VAR_P (decl) && !TREE_STATIC (decl) && DECL_INITIAL (decl) != NULL_TREE) return true; return false; } void c_print_identifier (FILE *file, tree node, int indent) { void (*save) (enum c_oracle_request, tree identifier); /* Temporarily hide any binding oracle. Without this, calls to debug_tree from the debugger will end up calling into the oracle, making for a confusing debug session. As the oracle isn't needed here for normal operation, it's simplest to suppress it. */ save = c_binding_oracle; c_binding_oracle = NULL; print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } c_binding_oracle = save; } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested, location_t locus) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc<c_binding> (); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->inner_comp = 0; b->in_struct = 0; b->locus = locus; b->u.type = NULL; b->prev = scope->bindings; scope->bindings = b; if (decl_jump_unsafe (decl)) scope->has_jump_unsafe_decl = 1; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: gcc_unreachable (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Bind a label. Like bind, but skip fields which aren't used for labels, and add the LABEL_VARS value. */ static void bind_label (tree name, tree label, struct c_scope *scope, struct c_label_vars *label_vars) { struct c_binding *b; bind (name, label, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); scope->has_label_bindings = true; b = scope->bindings; gcc_assert (b->decl == label); label_vars->shadowed = b->u.label; b->u.label = label_vars; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (VAR_P (decl)) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && !DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == NULL_TREE) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "array %q+D assumed to have one element", decl); complete_array_type (&TREE_TYPE (decl), NULL_TREE, true); relayout_decl (decl); } } } /* Record that inline function FUNC contains a reference (location LOC) to static DECL (file-scope or function-local according to TYPE). */ void record_inline_static (location_t loc, tree func, tree decl, enum c_inline_static_type type) { c_inline_static *csi = ggc_alloc<c_inline_static> (); csi->location = loc; csi->function = func; csi->static_decl = decl; csi->type = type; csi->next = c_inline_statics; c_inline_statics = csi; } /* Check for references to static declarations in inline functions at the end of the translation unit and diagnose them if the functions are still inline definitions. */ static void check_inline_statics (void) { struct c_inline_static *csi; for (csi = c_inline_statics; csi; csi = csi->next) { if (DECL_EXTERNAL (csi->function)) switch (csi->type) { case csi_internal: pedwarn (csi->location, 0, "%qD is static but used in inline function %qD " "which is not static", csi->static_decl, csi->function); break; case csi_modifiable: pedwarn (csi->location, 0, "%q+D is static but declared in inline function %qD " "which is not static", csi->static_decl, csi->function); break; default: gcc_unreachable (); } } c_inline_statics = NULL; } /* Fill in a c_spot_bindings structure. If DEFINING is true, set it for the current state, otherwise set it to uninitialized. */ static void set_spot_bindings (struct c_spot_bindings *p, bool defining) { if (defining) { p->scope = current_scope; p->bindings_in_scope = current_scope->bindings; } else { p->scope = NULL; p->bindings_in_scope = NULL; } p->stmt_exprs = 0; p->left_stmt_expr = false; } /* Update spot bindings P as we pop out of SCOPE. Return true if we should push decls for a label. */ static bool update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p) { if (p->scope != scope) { /* This label or goto is defined in some other scope, or it is a label which is not yet defined. There is nothing to update. */ return false; } /* Adjust the spot bindings to refer to the bindings already defined in the enclosing scope. */ p->scope = scope->outer; p->bindings_in_scope = p->scope->bindings; return true; } /* The Objective-C front-end often needs to determine the current scope. */ void * objc_get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) objc_volatilize_decl (b->decl); /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Return true if we are in the global binding level. */ bool global_bindings_p (void) { return current_scope == file_scope; } /* Return true if we're declaring parameters in an old-style function declaration. */ bool old_style_parameter_scope (void) { /* If processing parameters and there is no function statement list, we * have an old-style function declaration. */ return (current_scope->parm_flag && !DECL_SAVED_TREE (current_function_decl)); } void keep_next_level (void) { keep_next_level_flag = true; } /* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */ void set_float_const_decimal64 (void) { current_scope->float_const_decimal64 = true; } /* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */ void clear_float_const_decimal64 (void) { current_scope->float_const_decimal64 = false; } /* Return nonzero if an unsuffixed float constant is _Decimal64. */ bool float_const_decimal64_p (void) { return current_scope->float_const_decimal64; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope->outer) current_scope->float_const_decimal64 = current_scope->outer->float_const_decimal64; else current_scope->float_const_decimal64 = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_cleared_alloc<c_scope> (); /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope) scope->float_const_decimal64 = current_scope->float_const_decimal64; else scope->float_const_decimal64 = false; scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* This is called when we are leaving SCOPE. For each label defined in SCOPE, add any appropriate decls to its decls_in_scope fields. These are the decls whose initialization will be skipped by a goto later in the function. */ static void update_label_decls (struct c_scope *scope) { struct c_scope *s; s = scope; while (s != NULL) { if (s->has_label_bindings) { struct c_binding *b; for (b = s->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; struct c_binding *b1; bool hjud; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; b1 = label_vars->label_bindings.bindings_in_scope; if (label_vars->label_bindings.scope == NULL) hjud = false; else hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl; if (update_spot_bindings (scope, &label_vars->label_bindings)) { /* This label is defined in this scope. */ if (hjud) { for (; b1 != NULL; b1 = b1->prev) { /* A goto from later in the function to this label will never see the initialization of B1, if any. Save it to issue a warning if needed. */ if (decl_jump_unsafe (b1->decl)) vec_safe_push(label_vars->decls_in_scope, b1->decl); } } } /* Update the bindings of any goto statements associated with this label. */ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) update_spot_bindings (scope, &g->goto_bindings); } } /* Don't search beyond the current function. */ if (s == current_function_scope) break; s = s->outer; } } /* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */ static void set_type_context (tree type, tree context) { for (type = TYPE_MAIN_VARIANT (type); type; type = TYPE_NEXT_VARIANT (type)) TYPE_CONTEXT (type) = context; } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; update_label_decls (scope); /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = NULL_TREE; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = BLOCK_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = NULL_TREE; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_translation_unit_decl (get_identifier (main_input_filename)); context = file_decl; debug_hooks->register_main_translation_unit (file_decl); } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("label %q+D used but not defined", p); DECL_INITIAL (p) = error_mark_node; } else warn_for_unused_label (p); /* Labels go in BLOCK_VARS. */ DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; gcc_assert (I_LABEL_BINDING (b->id) == b); I_LABEL_BINDING (b->id) = b->shadowed; /* Also pop back to the shadowed label_vars. */ release_tree_vector (b->u.label->decls_in_scope); b->u.label = b->u.label->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: set_type_context (p, context); /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (!TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != NULL_TREE && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != NULL_TREE && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; if (!TREE_PUBLIC (p) && !DECL_INITIAL (p) && !b->nested && scope != file_scope && scope != external_scope) { error ("nested function %q+D declared but never defined", p); undef_nested_function = true; } else if (DECL_DECLARED_INLINE_P (p) && TREE_PUBLIC (p) && !DECL_INITIAL (p)) { /* C99 6.7.4p6: "a function with external linkage... declared with an inline function specifier ... shall also be defined in the same translation unit." */ if (!flag_gnu89_inline && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p)) && scope == external_scope) pedwarn (input_location, 0, "inline function %q+D declared but never defined", p); DECL_EXTERNAL (p) = 1; } goto common_symbol; case VAR_DECL: /* Warnings for unused variables. */ if ((!TREE_USED (p) || !DECL_READ_P (p)) && !TREE_NO_WARNING (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && scope != file_scope && scope != external_scope) { if (!TREE_USED (p)) warning (OPT_Wunused_variable, "unused variable %q+D", p); else if (DECL_CONTEXT (p) == current_function_decl) warning_at (DECL_SOURCE_LOCATION (p), OPT_Wunused_but_set_variable, "variable %qD set but not used", p); } if (b->inner_comp) { error ("type of array %q+D completed incompatibly with" " implicit initialization", p); } /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope) { /* For block local externs add a special DECL_EXTERNAL decl for debug info generation. */ tree extp = copy_node (p); DECL_EXTERNAL (extp) = 1; TREE_STATIC (extp) = 0; TREE_PUBLIC (extp) = 1; DECL_INITIAL (extp) = NULL_TREE; DECL_LANG_SPECIFIC (extp) = NULL; DECL_CONTEXT (extp) = current_function_decl; if (TREE_CODE (p) == FUNCTION_DECL) { DECL_RESULT (extp) = NULL_TREE; DECL_SAVED_TREE (extp) = NULL_TREE; DECL_STRUCT_FUNCTION (extp) = NULL; } if (b->locus != UNKNOWN_LOCATION) DECL_SOURCE_LOCATION (extp) = b->locus; DECL_CHAIN (extp) = BLOCK_VARS (block); BLOCK_VARS (block) = extp; } /* If this is the file scope set DECL_CONTEXT of each decl to the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p work. */ if (scope == file_scope) { DECL_CONTEXT (p) = context; if (TREE_CODE (p) == TYPE_DECL && TREE_TYPE (p) != error_mark_node) set_type_context (TREE_TYPE (p), context); } gcc_fallthrough (); /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } break; default: gcc_unreachable (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); check_inline_statics (); /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { c_common_write_pch (); /* Ensure even the callers don't try to finalize the CU. */ flag_syntax_only = 1; return; } /* Pop off the file scope and close this translation unit. */ pop_scope (); file_scope = 0; maybe_apply_pending_pragma_weaks (); } /* Adjust the bindings for the start of a statement expression. */ void c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; ++label_vars->label_bindings.stmt_exprs; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) ++g->goto_bindings.stmt_exprs; } } if (switch_bindings != NULL) ++switch_bindings->stmt_exprs; } /* Adjust the bindings for the end of a statement expression. */ void c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; --label_vars->label_bindings.stmt_exprs; if (label_vars->label_bindings.stmt_exprs < 0) { label_vars->label_bindings.left_stmt_expr = true; label_vars->label_bindings.stmt_exprs = 0; } FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { --g->goto_bindings.stmt_exprs; if (g->goto_bindings.stmt_exprs < 0) { g->goto_bindings.left_stmt_expr = true; g->goto_bindings.stmt_exprs = 0; } } } } if (switch_bindings != NULL) { --switch_bindings->stmt_exprs; gcc_assert (switch_bindings->stmt_exprs >= 0); } } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined, and has a location of LOC. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (location_t loc, tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (loc, TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); if (warn_cxx_compat && name != NULL_TREE) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b != NULL && b->decl != NULL_TREE && TREE_CODE (b->decl) == TYPE_DECL && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl)) != TYPE_MAIN_VARIANT (type))) { auto_diagnostic_group d; if (warning_at (loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), b->decl) && b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } } /* An exported interface to pushtag. This is used by the gdb plugin's binding oracle to introduce a new tag binding. */ void c_pushtag (location_t loc, tree name, tree type) { pushtag (loc, name, type); } /* An exported interface to bind a declaration. LOC is the location to use. DECL is the declaration to bind. The decl's name is used to determine how it is bound. If DECL is a VAR_DECL, then IS_GLOBAL determines whether the decl is put into the global (file and external) scope or the current function's scope; if DECL is not a VAR_DECL then it is always put into the file scope. */ void c_bind (location_t loc, tree decl, bool is_global) { struct c_scope *scope; bool nested = false; if (!VAR_P (decl) || current_function_scope == NULL) { /* Types and functions are always considered to be global. */ scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else if (is_global) { /* Also bind it into the external scope. */ bind (DECL_NAME (decl), decl, external_scope, true, false, loc); nested = true; scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else { DECL_CONTEXT (decl) = current_function_decl; TREE_PUBLIC (decl) = 0; scope = current_function_scope; } bind (DECL_NAME (decl), decl, scope, false, nested, loc); } /* Stores the first FILE*, const struct tm* etc. argument type (whatever it is) seen in a declaration of a file I/O etc. built-in, corresponding to the builtin_structptr_types array. Subsequent declarations of such built-ins are expected to refer to it rather than to fileptr_type_node, etc. which is just void* (or to any other type). Used only by match_builtin_function_types. */ static const unsigned builtin_structptr_type_count = sizeof builtin_structptr_types / sizeof builtin_structptr_types[0]; static GTY(()) tree last_structptr_types[builtin_structptr_type_count]; /* Returns true if types T1 and T2 representing return types or types of function arguments are close enough to be considered interchangeable in redeclarations of built-in functions. */ static bool types_close_enough_to_match (tree t1, tree t2) { return (TYPE_MODE (t1) == TYPE_MODE (t2) && POINTER_TYPE_P (t1) == POINTER_TYPE_P (t2) && FUNCTION_POINTER_TYPE_P (t1) == FUNCTION_POINTER_TYPE_P (t2)); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. Set *STRICT and *ARGNO to the expected argument type and number in case of an argument type mismatch or null and zero otherwise. Return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype, tree *strict, unsigned *argno) { *argno = 0; *strict = NULL_TREE; /* Accept the return type of the new declaration if it has the same mode and if they're both pointers or if neither is. */ tree oldrettype = TREE_TYPE (oldtype); tree newrettype = TREE_TYPE (newtype); if (!types_close_enough_to_match (oldrettype, newrettype)) return NULL_TREE; /* Check that the return types are compatible but don't fail if they are not (e.g., int vs long in ILP32) and just let the caller know. */ if (!comptypes (TYPE_MAIN_VARIANT (oldrettype), TYPE_MAIN_VARIANT (newrettype))) *strict = oldrettype; tree oldargs = TYPE_ARG_TYPES (oldtype); tree newargs = TYPE_ARG_TYPES (newtype); tree tryargs = newargs; const unsigned nlst = sizeof last_structptr_types / sizeof last_structptr_types[0]; const unsigned nbst = sizeof builtin_structptr_types / sizeof builtin_structptr_types[0]; gcc_checking_assert (nlst == nbst); for (unsigned i = 1; oldargs || newargs; ++i) { if (!oldargs || !newargs || !TREE_VALUE (oldargs) || !TREE_VALUE (newargs)) return NULL_TREE; tree oldtype = TYPE_MAIN_VARIANT (TREE_VALUE (oldargs)); tree newtype = TYPE_MAIN_VARIANT (TREE_VALUE (newargs)); if (!types_close_enough_to_match (oldtype, newtype)) return NULL_TREE; unsigned j = nbst; if (POINTER_TYPE_P (oldtype)) /* Iterate over well-known struct types like FILE (whose types aren't known to us) and compare the pointer to each to the pointer argument. */ for (j = 0; j < nbst; ++j) { if (TREE_VALUE (oldargs) != builtin_structptr_types[j].node) continue; /* Store the first FILE* etc. argument type (whatever it is), and expect any subsequent declarations of file I/O etc. built-ins to refer to it rather than to fileptr_type_node etc. which is just void* (or const void*). */ if (last_structptr_types[j]) { if (!comptypes (last_structptr_types[j], newtype)) { *argno = i; *strict = last_structptr_types[j]; } } else last_structptr_types[j] = newtype; break; } if (j == nbst && !comptypes (oldtype, newtype)) { if (POINTER_TYPE_P (oldtype)) { /* For incompatible pointers, only reject differences in the unqualified variants of the referenced types but consider differences in qualifiers as benign (report those to caller via *STRICT below). */ tree oldref = TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)); tree newref = TYPE_MAIN_VARIANT (TREE_TYPE (newtype)); if (!comptypes (oldref, newref)) return NULL_TREE; } if (!*strict) { *argno = i; *strict = oldtype; } } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } tree trytype = build_function_type (newrettype, tryargs); /* Allow declaration to change transaction_safe attribute. */ tree oldattrs = TYPE_ATTRIBUTES (oldtype); tree oldtsafe = lookup_attribute ("transaction_safe", oldattrs); tree newattrs = TYPE_ATTRIBUTES (newtype); tree newtsafe = lookup_attribute ("transaction_safe", newattrs); if (oldtsafe && !newtsafe) oldattrs = remove_attribute ("transaction_safe", oldattrs); else if (newtsafe && !oldtsafe) oldattrs = tree_cons (get_identifier ("transaction_safe"), NULL_TREE, oldattrs); return build_type_attribute_variant (trytype, oldattrs); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == NULL_TREE) || (!prototype_p (newtype) && DECL_INITIAL (newdecl) == NULL_TREE))) return; t = TYPE_ARG_TYPES (oldtype); if (t == NULL_TREE) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == NULL_TREE && TYPE_MAIN_VARIANT (type) != void_type_node) { inform (input_location, "a parameter list with an ellipsis " "cannot match an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default " "promotion cannot match an empty parameter name list " "declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; #define END_OF_ARGLIST(t) ((t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (oldargtype == error_mark_node || newargtype == error_mark_node) return false; oldargtype = (TYPE_ATOMIC (oldargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (oldargtype)); newargtype = (TYPE_ATOMIC (newargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (newargtype)); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("prototype for %q+D declares more arguments " "than previous old-style definition", newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("prototype for %q+D declares fewer arguments " "than previous old-style definition", newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (!comptypes (oldargtype, newargtype)) { error ("prototype for %q+D declares argument %d" " with incompatible type", newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning (0, "prototype for %q+D follows non-prototype definition", newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl) && !C_DECL_DECLARED_BUILTIN (decl)) ; else if (DECL_INITIAL (decl)) inform (input_location, "previous definition of %q+D was here", decl); else if (C_DECL_IMPLICIT (decl)) inform (input_location, "previous implicit declaration of %q+D was here", decl); else inform (input_location, "previous declaration of %q+D was here", decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool retval = true; #define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \ && DECL_EXTERNAL (DECL)) /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { auto_diagnostic_group d; error ("%q+D redeclared as different kind of symbol", newdecl); locate_old_decl (olddecl); } else if (TREE_PUBLIC (newdecl)) warning (OPT_Wbuiltin_declaration_mismatch, "built-in function %q+D declared as non-function", newdecl); else warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); return false; } /* Enumerators have no linkage, so may only be declared once in a given scope. */ if (TREE_CODE (olddecl) == CONST_DECL) { auto_diagnostic_group d; error ("redeclaration of enumerator %q+D", newdecl); locate_old_decl (olddecl); return false; } bool pedwarned = false; bool warned = false; auto_diagnostic_group d; if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && fndecl_built_in_p (olddecl, BUILT_IN_NORMAL) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept "harmless" mismatches in function types such as missing qualifiers or int vs long when they're the same size. However, diagnose return and argument types that are incompatible according to language rules. */ tree mismatch_expect; unsigned mismatch_argno; tree trytype = match_builtin_function_types (newtype, oldtype, &mismatch_expect, &mismatch_argno); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ const char *header = header_for_builtin_fn (olddecl); location_t loc = DECL_SOURCE_LOCATION (newdecl); if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch, "conflicting types for built-in function %q+D; " "expected %qT", newdecl, oldtype) && header) { /* Suggest the right header to include as the preferred solution rather than the spelling of the declaration. */ rich_location richloc (line_table, loc); maybe_add_include_fixit (&richloc, header, true); inform (&richloc, "%qD is declared in header %qs", olddecl, header); } return false; } if (mismatch_expect && extra_warnings) { location_t newloc = DECL_SOURCE_LOCATION (newdecl); bool warned = false; if (mismatch_argno) warned = warning_at (newloc, OPT_Wbuiltin_declaration_mismatch, "mismatch in argument %u type of built-in " "function %qD; expected %qT", mismatch_argno, newdecl, mismatch_expect); else warned = warning_at (newloc, OPT_Wbuiltin_declaration_mismatch, "mismatch in return type of built-in " "function %qD; expected %qT", newdecl, mismatch_expect); const char *header = header_for_builtin_fn (olddecl); if (warned && header) { rich_location richloc (line_table, newloc); maybe_add_include_fixit (&richloc, header, true); inform (&richloc, "%qD is declared in header %qs", olddecl, header); } } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; } /* Permit void foo (...) to match an earlier call to foo (...) with no declared type (thus, implicitly int). */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node && C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype; } else { int new_quals = TYPE_QUALS (newtype); int old_quals = TYPE_QUALS (oldtype); if (new_quals != old_quals) { addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals); addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals); if (new_addr != old_addr) { if (ADDR_SPACE_GENERIC_P (new_addr)) error ("conflicting named address spaces (generic vs %s) " "for %q+D", c_addr_space_name (old_addr), newdecl); else if (ADDR_SPACE_GENERIC_P (old_addr)) error ("conflicting named address spaces (%s vs generic) " "for %q+D", c_addr_space_name (new_addr), newdecl); else error ("conflicting named address spaces (%s vs %s) " "for %q+D", c_addr_space_name (new_addr), c_addr_space_name (old_addr), newdecl); } if (CLEAR_QUAL_ADDR_SPACE (new_quals) != CLEAR_QUAL_ADDR_SPACE (old_quals)) error ("conflicting type qualifiers for %q+D", newdecl); } else error ("conflicting types for %q+D", newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) This is allowed for C11 if the types are the same, not just compatible. */ if (TREE_CODE (newdecl) == TYPE_DECL) { bool types_different = false; int comptypes_result; comptypes_result = comptypes_check_different_types (oldtype, newtype, &types_different); if (comptypes_result != 1 || types_different) { error ("redefinition of typedef %q+D with different type", newdecl); locate_old_decl (olddecl); return false; } if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl) || TREE_NO_WARNING (newdecl) || TREE_NO_WARNING (olddecl)) return true; /* Allow OLDDECL to continue in use. */ if (variably_modified_type_p (newtype, NULL)) { error ("redefinition of typedef %q+D with variably modified type", newdecl); locate_old_decl (olddecl); } else if (pedwarn_c99 (input_location, OPT_Wpedantic, "redefinition of typedef %q+D", newdecl)) locate_old_decl (olddecl); return true; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but gnu89 mode permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { if (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !prototype_p (TREE_TYPE (newdecl)))) { warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wshadow, "declaration of %qD shadows " "a built-in function", newdecl); /* Discard the old built-in function. */ return false; } if (!prototype_p (TREE_TYPE (newdecl))) { /* Set for built-ins that take no arguments. */ bool func_void_args = false; if (tree at = TYPE_ARG_TYPES (oldtype)) func_void_args = VOID_TYPE_P (TREE_VALUE (at)); if (extra_warnings && !func_void_args) warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wbuiltin_declaration_mismatch, "declaration of built-in function %qD without " "a prototype; expected %qT", newdecl, TREE_TYPE (olddecl)); } } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl)) { /* If both decls are in the same TU and the new declaration isn't overriding an extern inline reject the new decl. In c99, no overriding is allowed in the same translation unit. */ if ((!DECL_EXTERN_INLINE (olddecl) || DECL_EXTERN_INLINE (newdecl) || (!flag_gnu89_inline && (!DECL_DECLARED_INLINE_P (olddecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl))) && (!DECL_DECLARED_INLINE_P (newdecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)))) ) && same_translation_unit_p (newdecl, olddecl)) { auto_diagnostic_group d; error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !prototype_p (oldtype) && prototype_p (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype)) { auto_diagnostic_group d; if (!validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl); return false; } } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !DECL_EXTERN_INLINE (olddecl)) { auto_diagnostic_group d; error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { auto_diagnostic_group d; error ("non-static declaration of %q+D follows " "static declaration", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } /* Make sure gnu_inline attribute is either not present, or present on all inline decls. */ if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool newa = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) != NULL; bool olda = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl)) != NULL; if (newa != olda) { auto_diagnostic_group d; error_at (input_location, "%<gnu_inline%> attribute present on %q+D", newa ? newdecl : olddecl); error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl), "but not here"); } } } else if (VAR_P (newdecl)) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl)) { /* Nothing to check. Since OLDDECL is marked threadprivate and NEWDECL does not have a thread-local attribute, we will merge the threadprivate attribute into NEWDECL. */ ; } else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl)) { auto_diagnostic_group d; if (DECL_THREAD_LOCAL_P (newdecl)) error ("thread-local declaration of %q+D follows " "non-thread-local declaration", newdecl); else error ("non-thread-local declaration of %q+D follows " "thread-local declaration", newdecl); locate_old_decl (olddecl); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { auto_diagnostic_group d; error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (!DECL_FILE_SCOPE_P (olddecl)) { auto_diagnostic_group d; error ("extern declaration of %q+D follows " "declaration with no linkage", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } else { auto_diagnostic_group d; if (TREE_PUBLIC (newdecl)) error ("non-static declaration of %q+D follows " "static declaration", newdecl); else error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl)) { if (DECL_EXTERNAL (newdecl)) { /* Extern with initializer at block scope, which will already have received an error. */ } else if (DECL_EXTERNAL (olddecl)) { auto_diagnostic_group d; error ("declaration of %q+D with no linkage follows " "extern declaration", newdecl); locate_old_decl (olddecl); } else { auto_diagnostic_group d; error ("redeclaration of %q+D with no linkage", newdecl); locate_old_decl (olddecl); } return false; } /* C++ does not permit a decl to appear multiple times at file scope. */ if (warn_cxx_compat && DECL_FILE_SCOPE_P (newdecl) && !DECL_EXTERNAL (newdecl) && !DECL_EXTERNAL (olddecl)) warned |= warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wc___compat, ("duplicate declaration of %qD is " "invalid in C++"), newdecl); } /* warnings */ /* All decls must agree on a visibility. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warned |= warning (0, "redeclaration of %q+D with different visibility " "(old visibility preserved)", newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) warned |= diagnose_mismatched_attributes (olddecl, newdecl); else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { auto_diagnostic_group d; error ("redefinition of parameter %q+D", newdecl); locate_old_decl (olddecl); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about redundant redeclarations of builtins. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && !fndecl_built_in_p (newdecl) && fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) /* Don't warn about a variable definition following a declaration. */ && !(VAR_P (newdecl) && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))) { warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D", newdecl); } /* Report location of previous decl/defn. */ if (warned || pedwarned) locate_old_decl (olddecl); #undef DECL_EXTERN_INLINE return retval; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != NULL_TREE); bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (newdecl))); bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (olddecl))); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = &current_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; gcc_unreachable (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = newtype; newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem)); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); if (TYPE_MAIN_VARIANT (remove) == remove) { gcc_assert (TYPE_NEXT_VARIANT (remove) == NULL_TREE); /* If remove is the main variant, no need to remove that from the list. One of the DECL_ORIGINAL_TYPE variants, e.g. created for aligned attribute, might still refer to the newdecl TYPE_DECL though, so remove that one in that case. */ if (DECL_ORIGINAL_TYPE (newdecl) && DECL_ORIGINAL_TYPE (newdecl) != remove) for (tree t = TYPE_MAIN_VARIANT (DECL_ORIGINAL_TYPE (newdecl)); t; t = TYPE_MAIN_VARIANT (t)) if (TYPE_NAME (TYPE_NEXT_VARIANT (t)) == newdecl) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (TYPE_NEXT_VARIANT (t)); break; } } else for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (!comptypes (oldtype, TREE_TYPE (newdecl))) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); SET_DECL_MODE (newdecl, DECL_MODE (olddecl)); if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl)); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } if (DECL_WARN_IF_NOT_ALIGN (olddecl) > DECL_WARN_IF_NOT_ALIGN (newdecl)) SET_DECL_WARN_IF_NOT_ALIGN (newdecl, DECL_WARN_IF_NOT_ALIGN (olddecl)); } /* Keep the old rtl since we can safely use it. */ if (HAS_RTL_P (olddecl)) COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* If a decl is in a system header and the other isn't, keep the one on the system header. Otherwise, keep source location of definition rather than declaration and of prototype rather than non-prototype unless that prototype is built-in. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_IN_SYSTEM_HEADER (newdecl) ) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_IN_SYSTEM_HEADER (olddecl)) DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl); else if ((DECL_INITIAL (newdecl) == NULL_TREE && DECL_INITIAL (olddecl) != NULL_TREE) || (old_is_prototype && !new_is_prototype && !C_DECL_BUILTIN_PROTOTYPE (olddecl))) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == NULL_TREE) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the threadprivate attribute. */ if (VAR_P (olddecl) && C_DECL_THREADPRIVATE_P (olddecl)) C_DECL_THREADPRIVATE_P (newdecl) = 1; if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)) { /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Use visibility of whichever declaration had it specified */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); if (DECL_IS_OPERATOR_NEW_P (olddecl)) DECL_SET_IS_OPERATOR_NEW (newdecl, true); if (DECL_IS_OPERATOR_DELETE_P (olddecl)) DECL_SET_IS_OPERATOR_DELETE (newdecl, true); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (!TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } } /* In c99, 'extern' declaration before (or after) 'inline' means this function is not DECL_EXTERNAL, unless 'gnu_inline' attribute is present. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && !flag_gnu89_inline && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && (!DECL_DECLARED_INLINE_P (newdecl) || !DECL_DECLARED_INLINE_P (olddecl) || !DECL_EXTERNAL (olddecl)) && DECL_EXTERNAL (newdecl) && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) && !current_function_decl) DECL_EXTERNAL (newdecl) = 0; /* An inline definition following a static declaration is not DECL_EXTERNAL. */ if (new_is_definition && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && !TREE_PUBLIC (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (!DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) /* The new defn must not be inline. */ DECL_UNINLINABLE (newdecl) = 1; else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } if (fndecl_built_in_p (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ copy_decl_built_in_function (newdecl, olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; if (new_is_prototype) { C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0; if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } copy_attributes_to_builtin (newdecl); } } else C_DECL_BUILTIN_PROTOTYPE (newdecl) = C_DECL_BUILTIN_PROTOTYPE (olddecl); } /* Preserve function specific target and optimization options */ if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); /* Also preserve various other info from the definition. */ if (!new_is_definition) { tree t; DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl)); for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = newdecl; /* See if we've got a function to instantiate from. */ if (DECL_SAVED_TREE (olddecl)) DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (VAR_P (olddecl) || TREE_CODE (olddecl) == PARM_DECL) DECL_READ_P (newdecl) |= DECL_READ_P (olddecl); if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Merge DECL_COMMON */ if (VAR_P (olddecl) && VAR_P (newdecl) && !lookup_attribute ("common", DECL_ATTRIBUTES (newdecl)) && !lookup_attribute ("nocommon", DECL_ATTRIBUTES (newdecl))) DECL_COMMON (newdecl) = DECL_COMMON (newdecl) && DECL_COMMON (olddecl); /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID, DECL_CONTEXT and DECL_ARGUMENTS (if appropriate). */ { unsigned olddecl_uid = DECL_UID (olddecl); tree olddecl_context = DECL_CONTEXT (olddecl); tree olddecl_arguments = NULL; if (TREE_CODE (olddecl) == FUNCTION_DECL) olddecl_arguments = DECL_ARGUMENTS (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); switch (TREE_CODE (olddecl)) { case FUNCTION_DECL: case VAR_DECL: { struct symtab_node *snode = olddecl->decl_with_vis.symtab_node; memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); olddecl->decl_with_vis.symtab_node = snode; if ((DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) && DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); /* This isn't quite correct for something like int __thread x attribute ((tls_model ("local-exec"))); extern int __thread x; as we'll lose the "local-exec" model. */ if (VAR_P (olddecl) && DECL_THREAD_LOCAL_P (newdecl)) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); break; } case FIELD_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: case CONST_DECL: case TYPE_DECL: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common)); } DECL_UID (olddecl) = olddecl_uid; DECL_CONTEXT (olddecl) = olddecl_context; if (TREE_CODE (olddecl) == FUNCTION_DECL) DECL_ARGUMENTS (olddecl) = olddecl_arguments; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (VAR_P (olddecl) && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) { /* Avoid `unused variable' and other warnings for OLDDECL. */ TREE_NO_WARNING (olddecl) = 1; return false; } merge_decls (newdecl, olddecl, newtype, oldtype); /* The NEWDECL will no longer be needed. Before releasing the node, be sure to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between NEWDECL and OLDECL. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (VAR_OR_FUNCTION_DECL_P (newdecl)) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } ggc_free (newdecl); return true; } /* Check whether decl-node NEW_DECL shadows an existing declaration. */ static void warn_if_shadowing (tree new_decl) { struct c_binding *b; /* Shadow warnings wanted? */ if (!(warn_shadow || warn_shadow_local || warn_shadow_compatible_local) /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new_decl)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed) if (b->decl && b->decl != new_decl && !b->invisible && (b->decl == error_mark_node || diagnostic_report_warnings_p (global_dc, DECL_SOURCE_LOCATION (b->decl)))) { tree old_decl = b->decl; if (old_decl == error_mark_node) { warning (OPT_Wshadow, "declaration of %q+D shadows previous " "non-variable", new_decl); break; } bool warned = false; auto_diagnostic_group d; if (TREE_CODE (old_decl) == PARM_DECL) { enum opt_code warning_code; /* If '-Wshadow=compatible-local' is specified without other -Wshadow= flags, we will warn only when the types of the shadowing variable (i.e. new_decl) and the shadowed variable (old_decl) are compatible. */ if (warn_shadow) warning_code = OPT_Wshadow; else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl))) warning_code = OPT_Wshadow_compatible_local; else warning_code = OPT_Wshadow_local; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code, "declaration of %qD shadows a parameter", new_decl); } else if (DECL_FILE_SCOPE_P (old_decl)) { /* Do not warn if a variable shadows a function, unless the variable is a function or a pointer-to-function. */ if (TREE_CODE (old_decl) == FUNCTION_DECL && TREE_CODE (new_decl) != FUNCTION_DECL && !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl))) continue; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow, "declaration of %qD shadows a global " "declaration", new_decl); } else if (TREE_CODE (old_decl) == FUNCTION_DECL && fndecl_built_in_p (old_decl)) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", new_decl); break; } else { enum opt_code warning_code; /* If '-Wshadow=compatible-local' is specified without other -Wshadow= flags, we will warn only when the types of the shadowing variable (i.e. new_decl) and the shadowed variable (old_decl) are compatible. */ if (warn_shadow) warning_code = OPT_Wshadow; else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl))) warning_code = OPT_Wshadow_compatible_local; else warning_code = OPT_Wshadow_local; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code, "declaration of %qD shadows a previous local", new_decl); } if (warned) inform (DECL_SOURCE_LOCATION (old_decl), "shadowed declaration is here"); break; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; location_t locus = DECL_SOURCE_LOCATION (x); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && (!VAR_OR_FUNCTION_DECL_P (x) || DECL_INITIAL (x) || !TREE_PUBLIC (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false, locus); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { struct c_binding *b_ext, *b_use; tree type = TREE_TYPE (x); tree visdecl = b->decl; tree vistype = TREE_TYPE (visdecl); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (x))) b->inner_comp = false; b_use = b; b_ext = b; /* If this is an external linkage declaration, we should check for compatibility with the type in the external scope before setting the type at this scope based on the visible information only. */ if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl)) { while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { b_use = b_ext; if (b_use->u.type) TREE_TYPE (b_use->decl) = b_use->u.type; } } if (duplicate_decls (x, b_use->decl)) { if (b_use != b) { /* Save the updated type in the external scope and restore the proper type for this scope. */ tree thistype; if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b_use->decl); b_use->u.type = TREE_TYPE (b_use->decl); if (TREE_CODE (b_use->decl) == FUNCTION_DECL && fndecl_built_in_p (b_use->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b_use->u.type)); TREE_TYPE (b_use->decl) = thistype; } return b_use->decl; } else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) However, in Objective-C, we also want to detect declarations conflicting with those of the basic types. */ if ((DECL_EXTERNAL (x) || scope == file_scope) && (VAR_OR_FUNCTION_DECL_P (x) || c_dialect_objc ())) { tree type = TREE_TYPE (x); tree vistype = NULL_TREE; tree visdecl = NULL_TREE; bool type_saved = false; if (b && !B_IN_EXTERNAL_SCOPE (b) && VAR_OR_FUNCTION_DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl)) { visdecl = b->decl; vistype = TREE_TYPE (visdecl); } if (scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning_at (locus, OPT_Wnested_externs, "nested extern declaration of %qD", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) { /* If this decl might be modified, save its type. This is done here rather than when the decl is first bound because the type may change after first binding, through being completed or through attributes being added. If we encounter multiple such decls, only the first should have its type saved; the others will already have had their proper types saved and the types will not have changed as their scopes will not have been re-entered. */ if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved) { b->u.type = TREE_TYPE (b->decl); type_saved = true; } if (B_IN_FILE_SCOPE (b) && VAR_P (b->decl) && TREE_STATIC (b->decl) && TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (b->decl)) && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { /* Array type completed in inner scope, which should be diagnosed if the completion does not have size 1 and it does not get completed in the file scope. */ b->inner_comp = true; } b = b->shadowed; } /* If a matching external declaration has been found, set its type to the composite of all the types of that declaration. After the consistency checks, it will be reset to the composite of the visible types only. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && b->u.type) TREE_TYPE (b->decl) = b->u.type; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { tree thistype; if (vistype) { if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b->decl); } else thistype = type; b->u.type = TREE_TYPE (b->decl); if (TREE_CODE (b->decl) == FUNCTION_DECL && fndecl_built_in_p (b->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b->u.type)); TREE_TYPE (b->decl) = thistype; bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true, locus); return b->decl; } else if (TREE_PUBLIC (x)) { if (visdecl && !b && duplicate_decls (x, visdecl)) { /* An external declaration at block scope referring to a visible entity with internal linkage. The composite type will already be correct for this scope, so we just need to fall through to make the declaration in this scope. */ nested = true; x = visdecl; } else { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, locus); nested = true; } } } if (TREE_CODE (x) != PARM_DECL) warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) { /* So this is a typedef, set its underlying type. */ set_underlying_type (x); /* If X is a typedef defined in the current function, record it for the purpose of implementing the -Wunused-local-typedefs warning. */ record_locally_defined_typedef (x); } bind (name, x, scope, /*invisible=*/false, nested, locus); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((RECORD_OR_UNION_TYPE_P (element) || TREE_CODE (element) == ENUMERAL_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Issue a warning about implicit function declaration. ID is the function identifier, OLDDECL is a declaration of the function in a different scope, or NULL_TREE. */ static void implicit_decl_warning (location_t loc, tree id, tree olddecl) { if (!warn_implicit_function_declaration) return; bool warned; auto_diagnostic_group d; name_hint hint; if (!olddecl) hint = lookup_name_fuzzy (id, FUZZY_LOOKUP_FUNCTION_NAME, loc); if (flag_isoc99) { if (const char *suggestion = hint.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE;" " did you mean %qs?", id, suggestion); } else warned = pedwarn (loc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE", id); } else if (const char *suggestion = hint.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); warned = warning_at (&richloc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE; did you mean %qs?"), id, suggestion); } else warned = warning_at (loc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE"), id); if (olddecl && warned) locate_old_decl (olddecl); if (!warned) hint.suppress (); } /* Return the name of the header file that declares built-in function FNDECL, or null if either we don't know or don't expect to see an explicit declaration. */ static const char * header_for_builtin_fn (tree fndecl) { if (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL) return NULL; switch (DECL_FUNCTION_CODE (fndecl)) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ASINH): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_ATAN2): CASE_FLT_FN (BUILT_IN_CBRT): CASE_FLT_FN (BUILT_IN_CEIL): CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL): CASE_FLT_FN (BUILT_IN_COPYSIGN): CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_ERF): CASE_FLT_FN (BUILT_IN_ERFC): CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_FABS): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS): CASE_FLT_FN (BUILT_IN_FDIM): CASE_FLT_FN (BUILT_IN_FLOOR): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR): CASE_FLT_FN (BUILT_IN_FMA): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA): CASE_FLT_FN (BUILT_IN_FMAX): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMAX): CASE_FLT_FN (BUILT_IN_FMIN): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMIN): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_FREXP): CASE_FLT_FN (BUILT_IN_HYPOT): CASE_FLT_FN (BUILT_IN_ILOGB): CASE_FLT_FN (BUILT_IN_LDEXP): CASE_FLT_FN (BUILT_IN_LGAMMA): CASE_FLT_FN (BUILT_IN_LLRINT): CASE_FLT_FN (BUILT_IN_LLROUND): CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOGB): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_MODF): CASE_FLT_FN (BUILT_IN_NAN): CASE_FLT_FN (BUILT_IN_NEARBYINT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT): CASE_FLT_FN (BUILT_IN_NEXTAFTER): CASE_FLT_FN (BUILT_IN_NEXTTOWARD): CASE_FLT_FN (BUILT_IN_POW): CASE_FLT_FN (BUILT_IN_REMAINDER): CASE_FLT_FN (BUILT_IN_REMQUO): CASE_FLT_FN (BUILT_IN_RINT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT): CASE_FLT_FN (BUILT_IN_ROUND): CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND): CASE_FLT_FN (BUILT_IN_SCALBLN): CASE_FLT_FN (BUILT_IN_SCALBN): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SINH): CASE_FLT_FN (BUILT_IN_SINCOS): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_TAN): CASE_FLT_FN (BUILT_IN_TANH): CASE_FLT_FN (BUILT_IN_TGAMMA): CASE_FLT_FN (BUILT_IN_TRUNC): CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC): case BUILT_IN_ISINF: case BUILT_IN_ISNAN: return "<math.h>"; CASE_FLT_FN (BUILT_IN_CABS): CASE_FLT_FN (BUILT_IN_CACOS): CASE_FLT_FN (BUILT_IN_CACOSH): CASE_FLT_FN (BUILT_IN_CARG): CASE_FLT_FN (BUILT_IN_CASIN): CASE_FLT_FN (BUILT_IN_CASINH): CASE_FLT_FN (BUILT_IN_CATAN): CASE_FLT_FN (BUILT_IN_CATANH): CASE_FLT_FN (BUILT_IN_CCOS): CASE_FLT_FN (BUILT_IN_CCOSH): CASE_FLT_FN (BUILT_IN_CEXP): CASE_FLT_FN (BUILT_IN_CIMAG): CASE_FLT_FN (BUILT_IN_CLOG): CASE_FLT_FN (BUILT_IN_CONJ): CASE_FLT_FN (BUILT_IN_CPOW): CASE_FLT_FN (BUILT_IN_CPROJ): CASE_FLT_FN (BUILT_IN_CREAL): CASE_FLT_FN (BUILT_IN_CSIN): CASE_FLT_FN (BUILT_IN_CSINH): CASE_FLT_FN (BUILT_IN_CSQRT): CASE_FLT_FN (BUILT_IN_CTAN): CASE_FLT_FN (BUILT_IN_CTANH): return "<complex.h>"; case BUILT_IN_MEMCHR: case BUILT_IN_MEMCMP: case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCAT: case BUILT_IN_STRCHR: case BUILT_IN_STRCMP: case BUILT_IN_STRCPY: case BUILT_IN_STRCSPN: case BUILT_IN_STRLEN: case BUILT_IN_STRNCAT: case BUILT_IN_STRNCMP: case BUILT_IN_STRNCPY: case BUILT_IN_STRPBRK: case BUILT_IN_STRRCHR: case BUILT_IN_STRSPN: case BUILT_IN_STRSTR: return "<string.h>"; case BUILT_IN_FPRINTF: case BUILT_IN_PUTC: case BUILT_IN_FPUTC: case BUILT_IN_FPUTS: case BUILT_IN_FSCANF: case BUILT_IN_FWRITE: case BUILT_IN_PRINTF: case BUILT_IN_PUTCHAR: case BUILT_IN_PUTS: case BUILT_IN_SCANF: case BUILT_IN_SNPRINTF: case BUILT_IN_SPRINTF: case BUILT_IN_SSCANF: case BUILT_IN_VFPRINTF: case BUILT_IN_VFSCANF: case BUILT_IN_VPRINTF: case BUILT_IN_VSCANF: case BUILT_IN_VSNPRINTF: case BUILT_IN_VSPRINTF: case BUILT_IN_VSSCANF: return "<stdio.h>"; case BUILT_IN_ISALNUM: case BUILT_IN_ISALPHA: case BUILT_IN_ISBLANK: case BUILT_IN_ISCNTRL: case BUILT_IN_ISDIGIT: case BUILT_IN_ISGRAPH: case BUILT_IN_ISLOWER: case BUILT_IN_ISPRINT: case BUILT_IN_ISPUNCT: case BUILT_IN_ISSPACE: case BUILT_IN_ISUPPER: case BUILT_IN_ISXDIGIT: case BUILT_IN_TOLOWER: case BUILT_IN_TOUPPER: return "<ctype.h>"; case BUILT_IN_ISWALNUM: case BUILT_IN_ISWALPHA: case BUILT_IN_ISWBLANK: case BUILT_IN_ISWCNTRL: case BUILT_IN_ISWDIGIT: case BUILT_IN_ISWGRAPH: case BUILT_IN_ISWLOWER: case BUILT_IN_ISWPRINT: case BUILT_IN_ISWPUNCT: case BUILT_IN_ISWSPACE: case BUILT_IN_ISWUPPER: case BUILT_IN_ISWXDIGIT: case BUILT_IN_TOWLOWER: case BUILT_IN_TOWUPPER: return "<wctype.h>"; case BUILT_IN_ABORT: case BUILT_IN_ABS: case BUILT_IN_CALLOC: case BUILT_IN_EXIT: case BUILT_IN_FREE: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_MALLOC: case BUILT_IN_REALLOC: case BUILT_IN__EXIT2: case BUILT_IN_ALIGNED_ALLOC: return "<stdlib.h>"; case BUILT_IN_IMAXABS: return "<inttypes.h>"; case BUILT_IN_STRFTIME: return "<time.h>"; default: return NULL; } } /* Generate an implicit declaration for identifier FUNCTIONID at LOC as a function of type int (). */ tree implicitly_declare (location_t loc, tree functionid) { struct c_binding *b; tree decl = NULL_TREE; tree asmspec_tree; for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed) { if (B_IN_SCOPE (b, external_scope)) { decl = b->decl; break; } } if (decl) { if (TREE_CODE (decl) != FUNCTION_DECL) return decl; /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!fndecl_built_in_p (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } else { tree newtype = default_function_type; if (b->u.type) TREE_TYPE (decl) = b->u.type; /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration but with the new type. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (loc, functionid, decl); C_DECL_IMPLICIT (decl) = 1; } if (fndecl_built_in_p (decl)) { newtype = build_type_attribute_variant (newtype, TYPE_ATTRIBUTES (TREE_TYPE (decl))); if (!comptypes (newtype, TREE_TYPE (decl))) { bool warned = warning_at (loc, 0, "incompatible implicit " "declaration of built-in " "function %qD", decl); /* See if we can hint which header to include. */ const char *header = header_for_builtin_fn (decl); if (header != NULL && warned) { rich_location richloc (line_table, loc); maybe_add_include_fixit (&richloc, header, true); inform (&richloc, "include %qs or provide a declaration of %qD", header, decl); } newtype = TREE_TYPE (decl); } } else { if (!comptypes (newtype, TREE_TYPE (decl))) { auto_diagnostic_group d; error_at (loc, "incompatible implicit declaration of " "function %qD", decl); locate_old_decl (decl); } } b->u.type = TREE_TYPE (decl); TREE_TYPE (decl) = newtype; bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } } /* Not seen before. */ decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (loc, functionid, 0); asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL); if (asmspec_tree) set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree)); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. The error message should be given location LOC. */ void undeclared_variable (location_t loc, tree id) { static bool already = false; struct c_scope *scope; auto_diagnostic_group d; if (current_function_decl == NULL_TREE) { name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc); if (const char *suggestion = guessed_id.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); error_at (&richloc, "%qE undeclared here (not in a function);" " did you mean %qs?", id, suggestion); } else error_at (loc, "%qE undeclared here (not in a function)", id); scope = current_scope; } else { if (!objc_diagnose_private_ivar (id)) { name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc); if (const char *suggestion = guessed_id.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); error_at (&richloc, "%qE undeclared (first use in this function);" " did you mean %qs?", id, suggestion); } else error_at (loc, "%qE undeclared (first use in this function)", id); } if (!already) { inform (loc, "each undeclared identifier is reported only" " once for each function it appears in"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. Also create a struct c_label_vars initialized for the current scope. */ static tree make_label (location_t location, tree name, bool defining, struct c_label_vars **p_label_vars) { tree label = build_decl (location, LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; SET_DECL_MODE (label, VOIDmode); c_label_vars *label_vars = ggc_alloc<c_label_vars> (); label_vars->shadowed = NULL; set_spot_bindings (&label_vars->label_bindings, defining); label_vars->decls_in_scope = make_tree_vector (); label_vars->gotos = NULL; *p_label_vars = label_vars; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; struct c_label_vars *label_vars; if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); return NULL_TREE; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (DECL_INITIAL (label) == NULL_TREE) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (input_location, name, false, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); return label; } /* Issue a warning about DECL for a goto statement at GOTO_LOC going to LABEL. */ static void warn_about_goto (location_t goto_loc, tree label, tree decl) { if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) error_at (goto_loc, "jump into scope of identifier with variably modified type"); else warning_at (goto_loc, OPT_Wjump_misses_init, "jump skips variable initialization"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl); } /* Look up a label because of a goto statement. This is like lookup_label, but also issues any appropriate warnings. */ tree lookup_label_for_goto (location_t loc, tree name) { tree label; struct c_label_vars *label_vars; unsigned int ix; tree decl; label = lookup_label (name); if (label == NULL_TREE) return NULL_TREE; /* If we are jumping to a different function, we can't issue any useful warnings. */ if (DECL_CONTEXT (label) != current_function_decl) { gcc_assert (C_DECLARED_LABEL_FLAG (label)); return label; } label_vars = I_LABEL_BINDING (name)->u.label; /* If the label has not yet been defined, then push this goto on a list for possible later warnings. */ if (label_vars->label_bindings.scope == NULL) { c_goto_bindings *g = ggc_alloc<c_goto_bindings> (); g->loc = loc; set_spot_bindings (&g->goto_bindings, true); vec_safe_push (label_vars->gotos, g); return label; } /* If there are any decls in label_vars->decls_in_scope, then this goto has missed the declaration of the decl. This happens for a case like int i = 1; lab: ... goto lab; Issue a warning or error. */ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl) warn_about_goto (loc, label, decl); if (label_vars->label_bindings.left_stmt_expr) { error_at (loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; struct c_label_vars *label_vars; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { auto_diagnostic_group d; error ("duplicate label declaration %qE", name); locate_old_decl (b->decl); /* Just use the previous declaration. */ return b->decl; } label = make_label (input_location, name, false, &label_vars); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind_label (name, label, current_scope, label_vars); return label; } /* When we define a label, issue any appropriate warnings if there are any gotos earlier in the function which jump to this label. */ static void check_earlier_gotos (tree label, struct c_label_vars* label_vars) { unsigned int ix; struct c_goto_bindings *g; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { struct c_binding *b; struct c_scope *scope; /* We have a goto to this label. The goto is going forward. In g->scope, the goto is going to skip any binding which was defined after g->bindings_in_scope. */ if (g->goto_bindings.scope->has_jump_unsafe_decl) { for (b = g->goto_bindings.scope->bindings; b != g->goto_bindings.bindings_in_scope; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } /* We also need to warn about decls defined in any scopes between the scope of the label and the scope of the goto. */ for (scope = label_vars->label_bindings.scope; scope != g->goto_bindings.scope; scope = scope->outer) { gcc_assert (scope != NULL); if (scope->has_jump_unsafe_decl) { if (scope == label_vars->label_bindings.scope) b = label_vars->label_bindings.bindings_in_scope; else b = scope->bindings; for (; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } } if (g->goto_bindings.stmt_exprs > 0) { error_at (g->loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } } /* Now that the label is defined, we will issue warnings about subsequent gotos to this label when we see them. */ vec_safe_truncate (label_vars->gotos, 0); label_vars->gotos = NULL; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return NULL_TREE. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != NULL_TREE) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { auto_diagnostic_group d; error_at (location, "duplicate label %qD", label); locate_old_decl (label); return NULL_TREE; } else if (label && DECL_CONTEXT (label) == current_function_decl) { struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label; /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; set_spot_bindings (&label_vars->label_bindings, true); /* Issue warnings as required about any goto statements from earlier in the function. */ check_earlier_gotos (label, label_vars); } else { struct c_label_vars *label_vars; /* No label binding for that identifier; make one. */ label = make_label (location, name, true, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); } if (!in_system_header_at (input_location) && lookup_name (name)) warning_at (location, OPT_Wtraditional, "traditional C lacks a separate namespace " "for labels, identifier %qE conflicts", name); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Get the bindings for a new switch statement. This is used to issue warnings as appropriate for jumps from the switch to case or default labels. */ struct c_spot_bindings * c_get_switch_bindings (void) { struct c_spot_bindings *switch_bindings; switch_bindings = XNEW (struct c_spot_bindings); set_spot_bindings (switch_bindings, true); return switch_bindings; } void c_release_switch_bindings (struct c_spot_bindings *bindings) { gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr); XDELETE (bindings); } /* This is called at the point of a case or default label to issue warnings about decls as needed. It returns true if it found an error, not just a warning. */ bool c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, location_t switch_loc, location_t case_loc) { bool saw_error; struct c_scope *scope; saw_error = false; for (scope = current_scope; scope != switch_bindings->scope; scope = scope->outer) { struct c_binding *b; gcc_assert (scope != NULL); if (!scope->has_jump_unsafe_decl) continue; for (b = scope->bindings; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) { if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE)) { saw_error = true; error_at (case_loc, ("switch jumps into scope of identifier with " "variably modified type")); } else warning_at (case_loc, OPT_Wjump_misses_init, "switch jumps over variable initialization"); inform (switch_loc, "switch starts here"); inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here", b->decl); } } } if (switch_bindings->stmt_exprs > 0) { saw_error = true; error_at (case_loc, "switch jumps into statement expression"); inform (switch_loc, "switch starts here"); } return saw_error; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If PLOC is not NULL and this returns non-null, it sets *PLOC to the location where the tag was defined. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, bool thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); bool thislevel = false; if (!b || !b->decl) return NULL_TREE; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = true; } if (thislevel_only && !thislevel) return NULL_TREE; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } if (ploc != NULL) *ploc = b->locus; return b->decl; } /* Return true if a definition exists for NAME with code CODE. */ bool tag_exists_p (enum tree_code code, tree name) { struct c_binding *b = I_TAG_BINDING (name); if (b == NULL || b->decl == NULL_TREE) return false; return TREE_CODE (b->decl) == code; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != NULL_TREE) error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag", pending_invalid_xref); pending_invalid_xref = NULL_TREE; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return NULL_TREE if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) { maybe_record_typedef_use (b->decl); return b->decl; } return NULL_TREE; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return NULL_TREE; } /* Look for the closest match for NAME within the currently valid scopes. This finds the identifier with the lowest Levenshtein distance to NAME. If there are multiple candidates with equal minimal distance, the first one found is returned. Scopes are searched from innermost outwards, and within a scope in reverse order of declaration, thus benefiting candidates "near" to the current scope. The function also looks for similar macro names to NAME, since a misspelled macro name will not be expanded, and hence looks like an identifier to the C frontend. It also looks for start_typename keywords, to detect "singed" vs "signed" typos. Use LOC for any deferred diagnostics. */ name_hint lookup_name_fuzzy (tree name, enum lookup_name_fuzzy_kind kind, location_t loc) { gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE); /* First, try some well-known names in the C standard library, in case the user forgot a #include. */ const char *header_hint = get_c_stdlib_header_for_name (IDENTIFIER_POINTER (name)); if (header_hint) return name_hint (NULL, new suggest_missing_header (loc, IDENTIFIER_POINTER (name), header_hint)); /* Only suggest names reserved for the implementation if NAME begins with an underscore. */ bool consider_implementation_names = (IDENTIFIER_POINTER (name)[0] == '_'); best_match<tree, tree> bm (name); /* Look within currently valid scopes. */ for (c_scope *scope = current_scope; scope; scope = scope->outer) for (c_binding *binding = scope->bindings; binding; binding = binding->prev) { if (!binding->id || binding->invisible) continue; if (binding->decl == error_mark_node) continue; /* Don't use bindings from implicitly declared functions, as they were likely misspellings themselves. */ if (TREE_CODE (binding->decl) == FUNCTION_DECL) if (C_DECL_IMPLICIT (binding->decl)) continue; /* Don't suggest names that are reserved for use by the implementation, unless NAME began with an underscore. */ if (!consider_implementation_names) { const char *suggestion_str = IDENTIFIER_POINTER (binding->id); if (name_reserved_for_implementation_p (suggestion_str)) continue; } switch (kind) { case FUZZY_LOOKUP_TYPENAME: if (TREE_CODE (binding->decl) != TYPE_DECL) continue; break; case FUZZY_LOOKUP_FUNCTION_NAME: if (TREE_CODE (binding->decl) != FUNCTION_DECL) { /* Allow function pointers. */ if ((VAR_P (binding->decl) || TREE_CODE (binding->decl) == PARM_DECL) && TREE_CODE (TREE_TYPE (binding->decl)) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (binding->decl))) == FUNCTION_TYPE)) break; continue; } break; default: break; } bm.consider (binding->id); } /* Consider macros: if the user misspelled a macro name e.g. "SOME_MACRO" as: x = SOME_OTHER_MACRO (y); then "SOME_OTHER_MACRO" will survive to the frontend and show up as a misspelled identifier. Use the best distance so far so that a candidate is only set if a macro is better than anything so far. This allows early rejection (without calculating the edit distance) of macro names that must have distance >= bm.get_best_distance (), and means that we only get a non-NULL result for best_macro_match if it's better than any of the identifiers already checked, which avoids needless creation of identifiers for macro hashnodes. */ best_macro_match bmm (name, bm.get_best_distance (), parse_in); cpp_hashnode *best_macro = bmm.get_best_meaningful_candidate (); /* If a macro is the closest so far to NAME, use it, creating an identifier tree node for it. */ if (best_macro) { const char *id = (const char *)best_macro->ident.str; tree macro_as_identifier = get_identifier_with_length (id, best_macro->ident.len); bm.set_best_so_far (macro_as_identifier, bmm.get_best_distance (), bmm.get_best_candidate_length ()); } /* Try the "start_typename" keywords to detect "singed" vs "signed" typos. */ if (kind == FUZZY_LOOKUP_TYPENAME) { for (unsigned i = 0; i < num_c_common_reswords; i++) { const c_common_resword *resword = &c_common_reswords[i]; if (!c_keyword_starts_typename (resword->rid)) continue; tree resword_identifier = ridpointers [resword->rid]; if (!resword_identifier) continue; gcc_assert (TREE_CODE (resword_identifier) == IDENTIFIER_NODE); bm.consider (resword_identifier); } } tree best = bm.get_best_meaningful_candidate (); if (best) return name_hint (IDENTIFIER_POINTER (best), NULL); else return name_hint (NULL, NULL); } /* Table of supported standard (C2x) attributes. */ const struct attribute_spec std_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, affects_type_identity, handler, exclude } */ { "deprecated", 0, 1, false, false, false, false, handle_deprecated_attribute, NULL }, { "fallthrough", 0, 0, false, false, false, false, handle_fallthrough_attribute, NULL }, { "maybe_unused", 0, 0, false, false, false, false, handle_unused_attribute, NULL }, { NULL, 0, 0, false, false, false, false, NULL, NULL } }; /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { location_t save_loc = input_location; /* Initialize reserved words for parser. */ c_parse_init (); register_scoped_attributes (std_attribute_table, NULL); current_function_decl = NULL_TREE; gcc_obstack_init (&parser_obstack); /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ input_location = BUILTINS_LOCATION; c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); input_location = save_loc; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (location_t loc, tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (char_type_node, build_index_type (size_int (length))); type = c_build_qualified_type (type, TYPE_QUAL_CONST); decl = build_decl (loc, VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free (CONST_CAST (char *, name)); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl /* For invalid programs like this: void foo() const char* p = __FUNCTION__; the __FUNCTION__ is believed to appear in K&R style function parameter declarator. In that case we still don't have function_scope. */ && current_function_scope) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } finish_decl (decl, loc, init, NULL_TREE, NULL_TREE); return decl; } tree c_builtin_function (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); /* Should never be called on a symbol with a preexisting meaning. */ gcc_assert (!I_SYMBOL_BINDING (id)); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } tree c_builtin_function_ext_scope (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); if (external_scope) bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } /* Implement LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL. */ tree c_simulate_builtin_function_decl (tree decl) { tree type = TREE_TYPE (decl); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); return pushdecl (decl); } /* Warn about attributes in a context where they are unused (attribute-declarations, except for the "fallthrough" case, and attributes on statements). */ void c_warn_unused_attributes (tree attrs) { for (tree t = attrs; t != NULL_TREE; t = TREE_CHAIN (t)) if (get_attribute_namespace (t) == NULL_TREE) /* The specifications of standard attributes mean this is a constraint violation. */ pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored", get_attribute_name (t)); else warning (OPT_Wattributes, "%qE attribute ignored", get_attribute_name (t)); } /* Warn for standard attributes being applied to a type that is not being defined, where that is a constraint violation, and return a list of attributes with them removed. */ tree c_warn_type_attributes (tree attrs) { tree *attr_ptr = &attrs; while (*attr_ptr) if (get_attribute_namespace (*attr_ptr) == NULL_TREE) { pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored", get_attribute_name (*attr_ptr)); *attr_ptr = TREE_CHAIN (*attr_ptr); } else attr_ptr = &TREE_CHAIN (*attr_ptr); return attrs; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (const struct c_declspecs *declspecs) { shadow_tag_warned (declspecs, 0); } /* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning, but no pedwarn. */ void shadow_tag_warned (const struct c_declspecs *declspecs, int warned) { bool found_tag = false; if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p) { tree value = declspecs->type; enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag = true; if (declspecs->restrict_p) { error ("invalid use of %<restrict%>"); warned = 1; } if (name == NULL_TREE) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn (input_location, 0, "unnamed struct/union that defines no instances"); warned = 1; } } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->typespec_kind != ctsk_tagfirstref_attrs && declspecs->storage_class != csc_none) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with storage class specifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->typespec_kind != ctsk_tagfirstref_attrs && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with type qualifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->typespec_kind != ctsk_tagfirstref_attrs && declspecs->alignas_p) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with %<_Alignas%> " "does not redeclare tag"); warned = 1; pending_xref_error (); } else { pending_invalid_xref = NULL_TREE; t = lookup_tag (code, name, true, NULL); if (t == NULL_TREE) { t = make_node (code); pushtag (input_location, name, t); } } } else { if (warned != 1 && !in_system_header_at (input_location)) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } } } else if (warned != 1 && !in_system_header_at (input_location) && declspecs->typedef_p) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } pending_invalid_xref = NULL_TREE; if (declspecs->inline_p) { error ("%<inline%> in empty declaration"); warned = 1; } if (declspecs->noreturn_p) { error ("%<_Noreturn%> in empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_auto) { error ("%<auto%> in file-scope empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_register) { error ("%<register%> in file-scope empty declaration"); warned = 1; } if (!warned && !in_system_header_at (input_location) && declspecs->storage_class != csc_none) { warning (0, "useless storage class specifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->thread_p) { warning (0, "useless %qs in empty declaration", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); warned = 2; } if (!warned && !in_system_header_at (input_location) && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { warning (0, "useless type qualifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->alignas_p) { warning (0, "useless %<_Alignas%> in empty declaration"); warned = 2; } if (found_tag && warned == 2 && (declspecs->typespec_kind == ctsk_tagref_attrs || declspecs->typespec_kind == ctsk_tagfirstref_attrs)) { /* Standard attributes after the "struct" or "union" keyword are only permitted when the contents of the type are defined, or in the form "struct-or-union attribute-specifier-sequence identifier;". If the ';' was not present, attributes were diagnosed in the parser. Here, ensure that any other useless elements of the declaration result in a pedwarn, not just a warning. Forward declarations of enum types are not part of standard C, but handle them the same. */ pedwarn (input_location, 0, "invalid use of attributes in empty declaration"); warned = 1; } if (warned != 1) { if (declspecs->declspecs_seen_p && !declspecs->non_std_attrs_seen_p) /* An attribute declaration (but not a fallthrough attribute declaration, which was handled separately); warn if there are any attributes being ignored (but not if the attributes were empty). */ c_warn_unused_attributes (declspecs->attrs); else if (!found_tag) pedwarn (input_location, 0, "empty declaration"); } } /* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_* bits. SPECS represents declaration specifiers that the grammar only permits to contain type qualifiers and attributes. */ int quals_from_declspecs (const struct c_declspecs *specs) { int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0) | (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0) | (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0) | (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0) | (ENCODE_QUAL_ADDR_SPACE (specs->address_space))); gcc_assert (!specs->type && !specs->decl_attr && specs->typespec_word == cts_none && specs->storage_class == csc_none && !specs->typedef_p && !specs->explicit_signed_p && !specs->deprecated_p && !specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p && !specs->inline_p && !specs->noreturn_p && !specs->thread_p); return quals; } /* Construct an array declarator. LOC is the location of the beginning of the array (usually the opening brace). EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is true if "static" is inside the [], false otherwise. VLA_UNSPEC_P is true if the array is [*], a VLA of unspecified length which is nevertheless a complete type, false otherwise. The field for the contained declarator is left to be filled in by set_array_declarator_inner. */ struct c_declarator * build_array_declarator (location_t loc, tree expr, struct c_declspecs *quals, bool static_p, bool vla_unspec_p) { struct c_declarator *declarator = XOBNEW (&parser_obstack, struct c_declarator); declarator->id_loc = loc; declarator->kind = cdk_array; declarator->declarator = 0; declarator->u.array.dimen = expr; if (quals) { declarator->u.array.attrs = quals->attrs; declarator->u.array.quals = quals_from_declspecs (quals); } else { declarator->u.array.attrs = NULL_TREE; declarator->u.array.quals = 0; } declarator->u.array.static_p = static_p; declarator->u.array.vla_unspec_p = vla_unspec_p; if (static_p || quals != NULL) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<static%> or type " "qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<[*]%> array declarators"); if (vla_unspec_p) { if (!current_scope->parm_flag) { /* C99 6.7.5.2p4 */ error_at (loc, "%<[*]%> not allowed in other than " "function prototype scope"); declarator->u.array.vla_unspec_p = false; return NULL; } current_scope->had_vla_unspec = true; } return declarator; } /* Set the contained declarator of an array declarator. DECL is the declarator, as constructed by build_array_declarator; INNER is what appears on the left of the []. */ struct c_declarator * set_array_declarator_inner (struct c_declarator *decl, struct c_declarator *inner) { decl->declarator = inner; return decl; } /* INIT is a constructor that forms DECL's initializer. If the final element initializes a flexible array field, add the size of that initializer to DECL's size. */ static void add_flexible_array_elts_to_size (tree decl, tree init) { tree elt, type; if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))) return; elt = CONSTRUCTOR_ELTS (init)->last ().value; type = TREE_TYPE (elt); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == NULL_TREE && TYPE_DOMAIN (type) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) { complete_array_type (&type, elt, false); DECL_SIZE (decl) = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type)); DECL_SIZE_UNIT (decl) = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type)); } } /* Decode a "typename", such as "int **", returning a ..._TYPE node. Set *EXPR, if EXPR not NULL, to any expression to be evaluated before the type name, and set *EXPR_CONST_OPERANDS, if EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may appear in a constant expression. */ tree groktypename (struct c_type_name *type_name, tree *expr, bool *expr_const_operands) { tree type; tree attrs = type_name->specs->attrs; type_name->specs->attrs = NULL_TREE; type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME, false, NULL, &attrs, expr, expr_const_operands, DEPRECATED_NORMAL); /* Apply attributes. */ attrs = c_warn_type_attributes (attrs); decl_attributes (&type, attrs, 0); return type; } /* Wrapper for decl_attributes that adds some implicit attributes to VAR_DECLs or FUNCTION_DECLs. */ static tree c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute && ((VAR_P (*node) && is_global_var (*node)) || TREE_CODE (*node) == FUNCTION_DECL)) { if (VAR_P (*node) && !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node))) attributes = tree_cons (get_identifier ("omp declare target implicit"), NULL_TREE, attributes); else { attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); attributes = tree_cons (get_identifier ("omp declare target block"), NULL_TREE, attributes); } } /* Look up the current declaration with all the attributes merged so far so that attributes on the current declaration that's about to be pushed that conflict with the former can be detected, diagnosed, and rejected as appropriate. */ tree last_decl = lookup_name (DECL_NAME (*node)); if (!last_decl) last_decl = lookup_name_in_scope (DECL_NAME (*node), external_scope); return decl_attributes (node, attributes, flags, last_decl); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs, bool initialized, tree attributes) { tree decl; tree tem; tree expr = NULL_TREE; enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL, &attributes, &expr, NULL, deprecated_state); if (!decl || decl == error_mark_node) return NULL_TREE; if (expr) add_stmt (fold_convert (void_type_node, expr)); if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl)) && TREE_PUBLIC (decl)) warning (OPT_Wmain, "%q+D is usually a function", decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef %qD is initialized (use %<__typeof__%> instead)", decl); initialized = false; break; case FUNCTION_DECL: error ("function %qD is initialized like a variable", decl); initialized = false; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter %qD is initialized", decl); initialized = false; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = false; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (!poly_int_tree_p (TYPE_SIZE (TREE_TYPE (decl))) || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = false; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable %qD has initializer but incomplete type", decl); initialized = false; } else if (C_DECL_VARIABLE_SIZE (decl)) { /* Although C99 is unclear about whether incomplete arrays of VLAs themselves count as VLAs, it does not make sense to permit them to be initialized given that ordinary VLAs may not be initialized. */ error ("variable-sized object may not be initialized"); initialized = false; } } if (initialized) { if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl))); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (VAR_P (decl) && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL_P (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ c_decl_attributes (&decl, attributes, 0); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl)) || current_function_decl)) { if (declspecs->storage_class == csc_auto && current_scope != file_scope) ; else if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl); } if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { struct c_declarator *ce = declarator; if (ce->kind == cdk_pointer) ce = declarator->declarator; if (ce->kind == cdk_function) { tree args = ce->u.arg_info->parms; for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning (OPT_Wattributes, "inline function %q+D given attribute %qs", decl, "noinline"); /* C99 6.7.4p3: An inline definition of a function with external linkage shall not contain a definition of a modifiable object with static storage duration... */ if (VAR_P (decl) && current_scope != file_scope && TREE_STATIC (decl) && !TREE_READONLY (decl) && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl)) record_inline_static (input_location, current_function_decl, decl, csi_modifiable); if (c_dialect_objc () && VAR_OR_FUNCTION_DECL_P (decl)) objc_check_global_decl (decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); if (initialized && DECL_EXTERNAL (tem)) { DECL_EXTERNAL (tem) = 0; TREE_STATIC (tem) = 1; } return tem; } /* Subroutine of finish_decl. TYPE is the type of an uninitialized object DECL or the non-array element type if DECL is an uninitialized array. If that type has a const member, diagnose this. */ static void diagnose_uninitialized_cst_member (tree decl, tree type) { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { tree field_type; if (TREE_CODE (field) != FIELD_DECL) continue; field_type = strip_array_types (TREE_TYPE (field)); if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST) { warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const member in %qT is invalid in C++", strip_array_types (TREE_TYPE (decl))); inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field); } if (RECORD_OR_UNION_TYPE_P (field_type)) diagnose_uninitialized_cst_member (decl, field_type); } } /* Finish processing of a declaration; install its initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT_LOC is the location of the initial value. */ void finish_decl (tree decl, location_t init_loc, tree init, tree origtype, tree asmspec_tree) { tree type; bool was_incomplete = (DECL_SIZE (decl) == NULL_TREE); const char *asmspec = 0; /* If a name was specified, get the string. */ if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_FILE_SCOPE_P (decl)) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); if (VAR_P (decl) && TREE_STATIC (decl) && global_bindings_p ()) /* So decl is a global variable. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE) init = NULL_TREE; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = NULL_TREE; if (init) store_init_value (init_loc, decl, init, origtype); if (c_dialect_objc () && (VAR_OR_FUNCTION_DECL_P (decl) || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); type = TREE_TYPE (decl); /* Deduce size of array from initialization, if not already known. This is only needed for an initialization in the current scope; it must not be done for a file-scope initialization of a declaration with external linkage, redeclared in an inner scope with the outer declaration shadowed in an intermediate scope. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE && TREE_CODE (decl) != TYPE_DECL && !(TREE_PUBLIC (decl) && current_scope != file_scope)) { bool do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); switch (failure) { case 1: error ("initializer fails to determine size of %q+D", decl); break; case 2: if (do_default) error ("array size missing in %q+D", decl); break; case 3: error ("zero or negative size array %q+D", decl); break; case 0: /* For global variables, update the copy of the type that exists in the binding. */ if (TREE_PUBLIC (decl)) { struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl)); while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext && TREE_CODE (decl) == TREE_CODE (b_ext->decl)) { if (b_ext->u.type && comptypes (b_ext->u.type, type)) b_ext->u.type = composite_type (b_ext->u.type, type); else b_ext->u.type = type; } } break; default: gcc_unreachable (); } if (DECL_INITIAL (decl)) TREE_TYPE (DECL_INITIAL (decl)) = type; relayout_decl (decl); } /* Look for braced array initializers for character arrays and recursively convert them into STRING_CSTs. */ if (tree init = DECL_INITIAL (decl)) DECL_INITIAL (decl) = braced_lists_to_strings (type, init); if (VAR_P (decl)) { if (init && TREE_CODE (init) == CONSTRUCTOR) add_flexible_array_elts_to_size (decl, init); complete_flexible_array_elts (DECL_INITIAL (decl)); if (is_global_var (decl)) { type_context_kind context = (DECL_THREAD_LOCAL_P (decl) ? TCTX_THREAD_STORAGE : TCTX_STATIC_STORAGE); if (!verify_type_context (input_location, context, TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; } if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == NULL_TREE /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ ? (DECL_INITIAL (decl) != NULL_TREE || !DECL_FILE_SCOPE_P (decl)) /* An automatic variable with an incomplete type is an error. */ : !DECL_EXTERNAL (decl))) { error ("storage size of %q+D isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } if ((RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl)) || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && DECL_SIZE (decl) == NULL_TREE && TREE_STATIC (decl)) incomplete_record_decls.safe_push (decl); if (is_global_var (decl) && DECL_SIZE (decl) != NULL_TREE && TREE_TYPE (decl) != error_mark_node) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error ("storage size of %q+D isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } if (TREE_USED (type)) { TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; } } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update builtin_decl if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } /* If #pragma weak was used, mark the decl weak now. */ maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (VAR_OR_FUNCTION_DECL_P (decl)) { /* Determine the ELF visibility. */ if (TREE_PUBLIC (decl)) c_determine_visibility (decl); /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (asmspec) { /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (!DECL_FILE_SCOPE_P (decl) && VAR_P (decl) && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning (0, "ignoring %<asm%> specifier for non-static local " "variable %q+D", decl); else set_user_assembler_name (decl, asmspec); } if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; if (asmspec && VAR_P (decl) && C_DECL_REGISTER (decl)) DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (asmspec && C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } if (TREE_CODE (decl) != FUNCTION_DECL) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ /* Note that DECL_SIZE can be null due to errors. */ if (DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && !is_global_var (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == NULL_TREE) DECL_INITIAL (decl) = NULL_TREE; } } } if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0); } /* Install a cleanup (aka destructor) if one was given. */ if (VAR_P (decl) && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; vec<tree, va_gc> *v; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (input_location, ADDR_EXPR, decl, false); vec_alloc (v, 1); v->quick_push (cleanup); cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl), vNULL, cleanup_decl, v, NULL); vec_free (v); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; DECL_READ_P (decl) = 1; push_cleanup (decl, cleanup, false); } } if (warn_cxx_compat && VAR_P (decl) && !DECL_EXTERNAL (decl) && DECL_INITIAL (decl) == NULL_TREE) { type = strip_array_types (type); if (TREE_READONLY (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized %<const %D%> is invalid in C++", decl); else if (RECORD_OR_UNION_TYPE_P (type) && C_TYPE_FIELDS_READONLY (type)) diagnose_uninitialized_cst_member (decl, type); } if (flag_openmp && VAR_P (decl) && lookup_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl))) { DECL_ATTRIBUTES (decl) = remove_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl)); if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (decl))) error ("%q+D in declare target directive does not have mappable type", decl); else if (!lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl)) && !lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("omp declare target"), NULL_TREE, DECL_ATTRIBUTES (decl)); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* Given a parsed parameter declaration, decode it into a PARM_DECL. EXPR is NULL or a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ tree grokparm (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); return decl; } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. EXPR is a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ void push_parm_decl (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl; decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); if (decl && DECL_P (decl)) DECL_SOURCE_LOCATION (decl) = parm->loc; decl_attributes (&decl, attrs, 0); decl = pushdecl (decl); finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn (input_location, OPT_Wpedantic, "ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound literal. NON_CONST is true if the initializers contain something that cannot occur in a constant expression. If ALIGNAS_ALIGN is nonzero, it is the (valid) alignment for this compound literal, as specified with _Alignas. */ tree build_compound_literal (location_t loc, tree type, tree init, bool non_const, unsigned int alignas_align) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl; tree complit; tree stmt; if (type == error_mark_node || init == error_mark_node) return error_mark_node; decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; C_DECL_COMPOUND_LITERAL_P (decl) = 1; TREE_TYPE (decl) = type; c_apply_type_quals_to_decl (TYPE_QUALS (strip_array_types (type)), decl); if (alignas_align) { SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT); DECL_USER_ALIGN (decl) = 1; } store_init_value (loc, decl, init, NULL_TREE); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), true); /* If complete_array_type returns 3, it means that the initial value of the compound literal is empty. Allow it. */ gcc_assert (failure == 0 || failure == 3); type = TREE_TYPE (decl); TREE_TYPE (DECL_INITIAL (decl)) = type; } if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { c_incomplete_type_error (loc, NULL_TREE, type); return error_mark_node; } if (TREE_STATIC (decl) && !verify_type_context (loc, TCTX_STATIC_STORAGE, type)) return error_mark_node; stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. */ set_compound_literal_name (decl); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, 1, 0); } else if (current_function_decl && !current_scope->parm_flag) pushdecl (decl); if (non_const) { complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit); C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1; } return complit; } /* Check the type of a compound literal. Here we just check that it is valid for C++. */ void check_compound_literal_type (location_t loc, struct c_type_name *type_name) { if (warn_cxx_compat && (type_name->specs->typespec_kind == ctsk_tagdef || type_name->specs->typespec_kind == ctsk_tagfirstref || type_name->specs->typespec_kind == ctsk_tagfirstref_attrs)) warning_at (loc, OPT_Wc___compat, "defining a type in a compound literal is invalid in C++"); } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (location_t loc, tree *type, tree *width, tree orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = (orig_name ? identifier_to_locale (IDENTIFIER_POINTER (orig_name)) : _("<anonymous>")); /* Detect and ignore out of range field width and process valid field widths. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (*width))) { error_at (loc, "bit-field %qs width not an integer constant", name); *width = integer_one_node; } else { if (TREE_CODE (*width) != INTEGER_CST) { *width = c_fully_fold (*width, false, NULL); if (TREE_CODE (*width) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "bit-field %qs width not an integer constant expression", name); } if (TREE_CODE (*width) != INTEGER_CST) { error_at (loc, "bit-field %qs width not an integer constant", name); *width = integer_one_node; } constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error_at (loc, "negative width in bit-field %qs", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error_at (loc, "zero width for bit-field %qs", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error_at (loc, "bit-field %qs has invalid type", name); *type = unsigned_type_node; } if (TYPE_WARN_IF_NOT_ALIGN (*type)) { error_at (loc, "cannot declare bit-field %qs with %<warn_if_not_aligned%> type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (!in_system_header_at (input_location) && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn_c90 (loc, OPT_Wpedantic, "type of bit-field %qs is a GCC extension", name); max_width = TYPE_PRECISION (*type); if (compare_tree_int (*width, max_width) > 0) { error_at (loc, "width of %qs exceeds its type", name); w = max_width; *width = build_int_cst (integer_type_node, w); } else w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning_at (loc, 0, "%qs is narrower than values of its type", name); } } /* Print warning about variable length array if necessary. */ static void warn_variable_length_array (tree name, tree size) { if (TREE_CONSTANT (size)) { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array %qE whose size " "cannot be evaluated", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array " "whose size cannot be evaluated"); } else { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable length array %qE", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable " "length array"); } } /* Print warning about defaulting to int if necessary. */ static void warn_defaults_to (location_t location, int opt, const char *gmsgid, ...) { diagnostic_info diagnostic; va_list ap; rich_location richloc (line_table, location); va_start (ap, gmsgid); diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc, flag_isoc99 ? DK_PEDWARN : DK_WARNING); diagnostic.option_index = opt; diagnostic_report_diagnostic (global_dc, &diagnostic); va_end (ap); } /* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS, considering only those c_declspec_words found in LIST, which must be terminated by cdw_number_of_elements. */ static location_t smallest_type_quals_location (const location_t *locations, const c_declspec_word *list) { location_t loc = UNKNOWN_LOCATION; while (*list != cdw_number_of_elements) { location_t newloc = locations[*list]; if (loc == UNKNOWN_LOCATION || (newloc != UNKNOWN_LOCATION && newloc < loc)) loc = newloc; list++; } return loc; } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return NULL_TREE.) DECLSPECS is a c_declspecs structure for the declaration specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is true if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. DECL_ATTRS points to the list of attributes that should be added to this decl. Any nested attributes that belong on the decl itself will be added to this list. If EXPR is not NULL, any expressions that need to be evaluated as part of evaluating variably modified types will be stored in *EXPR. If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be set to indicate whether operands in *EXPR can be used in constant expressions. DEPRECATED_STATE is a deprecated_states value indicating whether deprecation warnings should be suppressed. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (const struct c_declarator *declarator, struct c_declspecs *declspecs, enum decl_context decl_context, bool initialized, tree *width, tree *decl_attrs, tree *expr, bool *expr_const_operands, enum deprecated_states deprecated_state) { tree type = declspecs->type; bool threadp = declspecs->thread_p; enum c_storage_class storage_class = declspecs->storage_class; int constp; int restrictp; int volatilep; int atomicp; int type_quals = TYPE_UNQUALIFIED; tree name = NULL_TREE; bool funcdef_flag = false; bool funcdef_syntax = false; bool size_varies = false; tree decl_attr = declspecs->decl_attr; int array_ptr_quals = TYPE_UNQUALIFIED; tree array_ptr_attrs = NULL_TREE; bool array_parm_static = false; bool array_parm_vla_unspec_p = false; tree returned_attrs = NULL_TREE; tree decl_id_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree orig_qual_type = NULL; size_t orig_qual_indirect = 0; struct c_arg_info *arg_info = 0; addr_space_t as1, as2, address_space; location_t loc = UNKNOWN_LOCATION; tree expr_dummy; bool expr_const_operands_dummy; enum c_declarator_kind first_non_attr_kind; unsigned int alignas_align = 0; if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; if (expr == NULL) { expr = &expr_dummy; expr_dummy = NULL_TREE; } if (expr_const_operands == NULL) expr_const_operands = &expr_const_operands_dummy; if (declspecs->expr) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr, declspecs->expr); else *expr = declspecs->expr; } *expr_const_operands = declspecs->expr_const_operands; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as an IDENTIFIER_NODE, for an error message. */ { const struct c_declarator *decl = declarator; first_non_attr_kind = cdk_attrs; while (decl) switch (decl->kind) { case cdk_array: loc = decl->id_loc; /* FALL THRU. */ case cdk_function: case cdk_pointer: funcdef_syntax = (decl->kind == cdk_function); if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = decl->declarator; break; case cdk_attrs: decl = decl->declarator; break; case cdk_id: loc = decl->id_loc; if (decl->u.id.id) name = decl->u.id.id; decl_id_attrs = decl->u.id.attrs; if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = 0; break; default: gcc_unreachable (); } if (name == NULL_TREE) { gcc_assert (decl_context == PARM || decl_context == TYPENAME || (decl_context == FIELD && declarator->kind == cdk_id)); gcc_assert (!initialized); } } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && !funcdef_syntax) return NULL_TREE; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (declspecs->type, declspecs->decl_attr); if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope && variably_modified_type_p (type, NULL_TREE)) { if (name) error_at (loc, "variably modified %qE at file scope", name); else error_at (loc, "variably modified field at file scope"); type = integer_type_node; } size_varies = C_TYPE_VARIABLE_SIZE (type) != 0; /* Diagnose defaulting to "int". */ if (declspecs->default_int_p && !in_system_header_at (input_location)) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type > 0 || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else { if (name) warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in declaration " "of %qE", name); else warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in type name"); } } /* Adjust the type if a bit-field is being declared, -funsigned-bitfields applied and the type is not explicitly "signed". */ if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p && TREE_CODE (type) == INTEGER_TYPE) type = unsigned_type_for (type); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decl receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = declspecs->const_p + TYPE_READONLY (element_type); restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type); volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type); atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type); as1 = declspecs->address_space; as2 = TYPE_ADDR_SPACE (element_type); address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1; if (constp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>"); if (restrictp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>"); if (volatilep > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>"); if (atomicp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>"); if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2) error_at (loc, "conflicting named address spaces (%s vs %s)", c_addr_space_name (as1), c_addr_space_name (as2)); if ((TREE_CODE (type) == ARRAY_TYPE || first_non_attr_kind == cdk_array) && TYPE_QUALS (element_type)) { orig_qual_type = type; type = TYPE_MAIN_VARIANT (type); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0) | (atomicp ? TYPE_QUAL_ATOMIC : 0) | ENCODE_QUAL_ADDR_SPACE (address_space)); if (type_quals != TYPE_QUALS (element_type)) orig_qual_type = NULL_TREE; /* Applying the _Atomic qualifier to an array type (through the use of typedefs or typeof) must be detected here. If the qualifier is introduced later, any appearance of applying it to an array is actually applying it to an element of that array. */ if (declspecs->atomic_p && TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (funcdef_flag && (threadp || storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef)) { if (storage_class == csc_auto) pedwarn (loc, (current_scope == file_scope) ? 0 : OPT_Wpedantic, "function definition declared %<auto%>"); if (storage_class == csc_register) error_at (loc, "function definition declared %<register%>"); if (storage_class == csc_typedef) error_at (loc, "function definition declared %<typedef%>"); if (threadp) error_at (loc, "function definition declared %qs", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; if (storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef) storage_class = csc_none; } else if (decl_context != NORMAL && (storage_class != csc_none || threadp)) { if (decl_context == PARM && storage_class == csc_register) ; else { switch (decl_context) { case FIELD: if (name) error_at (loc, "storage class specified for structure " "field %qE", name); else error_at (loc, "storage class specified for structure field"); break; case PARM: if (name) error_at (loc, "storage class specified for parameter %qE", name); else error_at (loc, "storage class specified for unnamed parameter"); break; default: error_at (loc, "storage class specified for typename"); break; } storage_class = csc_none; threadp = false; } } else if (storage_class == csc_extern && initialized && !funcdef_flag) { /* 'extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) { /* It is fine to have 'extern const' when compiling at C and C++ intersection. */ if (!(warn_cxx_compat && constp)) warning_at (loc, 0, "%qE initialized and declared %<extern%>", name); } else error_at (loc, "%qE has both %<extern%> and initializer", name); } else if (current_scope == file_scope) { if (storage_class == csc_auto) error_at (loc, "file-scope declaration of %qE specifies %<auto%>", name); if (pedantic && storage_class == csc_register) pedwarn (input_location, OPT_Wpedantic, "file-scope declaration of %qE specifies %<register%>", name); } else { if (storage_class == csc_extern && funcdef_flag) error_at (loc, "nested function %qE declared %<extern%>", name); else if (threadp && storage_class == csc_none) { error_at (loc, "function-scope %qE implicitly auto and declared " "%qs", name, declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). At each stage we maintain an unqualified version of the type together with any qualifiers that should be applied to it with c_build_qualified_type; this way, array types including multidimensional array types are first built up in unqualified form and then the qualified form is created with TYPE_MAIN_VARIANT pointing to the unqualified form. */ while (declarator && declarator->kind != cdk_id) { if (type == error_mark_node) { declarator = declarator->declarator; continue; } /* Each level of DECLARATOR is either a cdk_array (for ...[..]), a cdk_pointer (for *...), a cdk_function (for ...(...)), a cdk_attrs (for nested attributes), or a cdk_id (for the name being declared or the place in an absolute declarator where the name was omitted). For the last case, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = false; } switch (declarator->kind) { case cdk_attrs: { /* A declarator with embedded attributes. */ tree attrs = declarator->u.attrs; const struct c_declarator *inner_decl; int attr_flags = 0; declarator = declarator->declarator; /* Standard attribute syntax precisely defines what entity an attribute in each position appertains to, so only apply laxity about positioning to GNU attribute syntax. Standard attributes applied to a function or array declarator apply exactly to that type; standard attributes applied to the identifier apply to the declaration rather than to the type, and are specified using a cdk_id declarator rather than using cdk_attrs. */ inner_decl = declarator; while (inner_decl->kind == cdk_attrs) inner_decl = inner_decl->declarator; if (!cxx11_attribute_p (attrs)) { if (inner_decl->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (inner_decl->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (inner_decl->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; } attrs = c_warn_type_attributes (attrs); returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); break; } case cdk_array: { tree itype = NULL_TREE; tree size = declarator->u.array.dimen; /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = declarator->u.array.quals; array_ptr_attrs = declarator->u.array.attrs; array_parm_static = declarator->u.array.static_p; array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p; declarator = declarator->declarator; /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { if (name) error_at (loc, "declaration of %qE as array of voids", name); else error_at (loc, "declaration of type name as array of voids"); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "declaration of %qE as array of functions", name); else error_at (loc, "declaration of type name as array of " "functions"); type = error_mark_node; } if (pedantic && !in_system_header_at (input_location) && flexible_array_type_p (type)) pedwarn (loc, OPT_Wpedantic, "invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; if (!verify_type_context (loc, TCTX_ARRAY_ELEMENT, type)) { type = error_mark_node; continue; } /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { bool size_maybe_const = true; bool size_int_const = (TREE_CODE (size) == INTEGER_CST && !TREE_OVERFLOW (size)); bool this_size_varies = false; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (!INTEGRAL_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has non-integer type", name); else error_at (loc, "size of unnamed array has non-integer type"); size = integer_one_node; size_int_const = true; } /* This can happen with enum forward declaration. */ else if (!COMPLETE_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has incomplete type", name); else error_at (loc, "size of unnamed array has incomplete " "type"); size = integer_one_node; size_int_const = true; } size = c_fully_fold (size, false, &size_maybe_const); if (pedantic && size_maybe_const && integer_zerop (size)) { if (name) pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array %qE", name); else pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array"); } if (TREE_CODE (size) == INTEGER_CST && size_maybe_const) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { if (name) error_at (loc, "size of array %qE is negative", name); else error_at (loc, "size of unnamed array is negative"); size = integer_one_node; size_int_const = true; } /* Handle a size folded to an integer constant but not an integer constant expression. */ if (!size_int_const) { /* If this is a file scope declaration of an ordinary identifier, this is invalid code; diagnosing it here and not subsequently treating the type as variable-length avoids more confusing diagnostics later. */ if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) pedwarn (input_location, 0, "variably modified %qE at file scope", name); else this_size_varies = size_varies = true; warn_variable_length_array (name, size); } } else if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) { error_at (loc, "variably modified %qE at file scope", name); size = integer_one_node; } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ this_size_varies = size_varies = true; warn_variable_length_array (name, size); if (sanitize_flags_p (SANITIZE_VLA) && current_function_decl != NULL_TREE && decl_context == NORMAL) { /* Evaluate the array size only once. */ size = save_expr (size); size = c_fully_fold (size, false, NULL); size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size), ubsan_instrument_vla (loc, size), size); } } if (integer_zerop (size) && !this_size_varies) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ if (size_varies) size = save_expr (size); if (this_size_varies && TREE_CODE (size) == INTEGER_CST) size = build2 (COMPOUND_EXPR, TREE_TYPE (size), integer_zero_node, size); /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold_build2_loc (loc, MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node)); /* The above overflows when size does not fit in index_type. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), handling this case seems like an unnecessary complication. */ if (TREE_CODE (size) == INTEGER_CST && !int_fits_type_p (size, index_type)) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); type = error_mark_node; continue; } itype = build_index_type (itype); } if (this_size_varies) { if (TREE_SIDE_EFFECTS (size)) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (size), *expr, size); else *expr = size; } *expr_const_operands &= size_maybe_const; } } else if (decl_context == FIELD) { bool flexible_array_member = false; if (array_parm_vla_unspec_p) /* Field names can in fact have function prototype scope so [*] is disallowed here through making the field variably modified, not through being something other than a declaration with function prototype scope. */ size_varies = true; else { const struct c_declarator *t = declarator; while (t->kind == cdk_attrs) t = t->declarator; flexible_array_member = (t->kind == cdk_id); } if (flexible_array_member && !in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ if (flexible_array_member || array_parm_vla_unspec_p) itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } else if (decl_context == PARM) { if (array_parm_vla_unspec_p) { itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } else if (decl_context == TYPENAME) { if (array_parm_vla_unspec_p) { /* C99 6.7.5.2p4 */ warning (0, "%<[*]%> not in a declaration"); /* We use this to avoid messing up with incomplete array types of the same type, that would otherwise be modified below. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } /* Complain about arrays of incomplete types. */ if (!COMPLETE_TYPE_P (type)) { error_at (loc, "array type has incomplete element type %qT", type); /* See if we can be more helpful. */ if (TREE_CODE (type) == ARRAY_TYPE) { if (name) inform (loc, "declaration of %qE as multidimensional " "array must have bounds for all dimensions " "except the first", name); else inform (loc, "declaration of multidimensional array " "must have bounds for all dimensions except " "the first"); } type = error_mark_node; } else /* When itype is NULL, a shared incomplete array type is returned for all array of a given type. Elsewhere we make sure we don't complete that type before copying it, but here we want to make sure we don't ever modify the shared type, so we gcc_assert (itype) below. */ { addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type)) type = build_qualified_type (type, ENCODE_QUAL_ADDR_SPACE (as)); type = build_array_type (type, itype); } if (type != error_mark_node) { if (size_varies) { /* It is ok to modify type here even if itype is NULL: if size_varies, we're in a multi-dimensional array and the inner type has variable size, so the enclosing shared array type must too. */ if (size && TREE_CODE (size) == INTEGER_CST) type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); C_TYPE_VARIABLE_SIZE (type) = 1; } /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { gcc_assert (itype); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (array_parm_vla_unspec_p) { gcc_assert (itype); /* The type is complete. C99 6.7.5.2p4 */ type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (!valid_array_size_p (loc, type, name)) type = error_mark_node; } if (decl_context != PARM && (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static)) { error_at (loc, "static or type qualifiers in non-parameter " "array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = false; } orig_qual_indirect++; break; } case cdk_function: { /* Say it's a definition only for the declarator closest to the identifier, apart possibly from some attributes. */ bool really_funcdef = false; tree arg_types; orig_qual_type = NULL_TREE; if (funcdef_flag) { const struct c_declarator *t = declarator->declarator; while (t->kind == cdk_attrs) t = t->declarator; really_funcdef = (t->kind == cdk_id); } /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = false; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "%qE declared as function returning a " "function", name); else error_at (loc, "type name declared as function " "returning a function"); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { if (name) error_at (loc, "%qE declared as function returning an array", name); else error_at (loc, "type name declared as function returning " "an array"); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = declarator->u.arg_info; arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { const enum c_declspec_word ignored_quals_list[] = { cdw_const, cdw_volatile, cdw_restrict, cdw_address_space, cdw_atomic, cdw_number_of_elements }; location_t specs_loc = smallest_type_quals_location (declspecs->locations, ignored_quals_list); if (specs_loc == UNKNOWN_LOCATION) specs_loc = declspecs->locations[cdw_typedef]; if (specs_loc == UNKNOWN_LOCATION) specs_loc = loc; /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wreturn-type. Qualifiers on a void return type are banned on function definitions in ISO C; GCC used to used them for noreturn functions. The resolution of C11 DR#423 means qualifiers (other than _Atomic) are actually removed from the return type when determining the function type. */ int quals_used = type_quals; if (flag_isoc11) quals_used &= TYPE_QUAL_ATOMIC; if (quals_used && VOID_TYPE_P (type) && really_funcdef) pedwarn (specs_loc, 0, "function definition has qualified void " "return type"); else warning_at (specs_loc, OPT_Wignored_qualifiers, "type qualifiers ignored on function " "return type"); /* Ensure an error for restrict on invalid types; the DR#423 resolution is not entirely clear about this. */ if (flag_isoc11 && (type_quals & TYPE_QUAL_RESTRICT) && (!POINTER_TYPE_P (type) || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type)))) error_at (loc, "invalid use of %<restrict%>"); type = c_build_qualified_type (type, quals_used); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = declarator->declarator; /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { c_arg_tag *tag; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) TYPE_CONTEXT (tag->type) = type; } break; } case cdk_pointer: { /* Merge any constancy or volatility into the target type for the pointer. */ if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); orig_qual_type = NULL_TREE; size_varies = false; /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We attach an artificial TYPE_DECL to such pointed-to type and arrange for it to be included in a DECL_EXPR. This forces the sizes evaluation at a safe point and ensures it is not deferred until e.g. within a deeper conditional context. PARM contexts have no enclosing statement list that can hold the DECL_EXPR, so we need to use a BIND_EXPR instead, and add it to the list of expressions that need to be evaluated. TYPENAME contexts do have an enclosing statement list, but it would be incorrect to use it, as the size should only be evaluated if the containing expression is evaluated. We might also be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the fake TYPE_DECL in the enclosing context would force the size evaluation prior to the side effects. We therefore use BIND_EXPRs in TYPENAME contexts too. */ if (!TYPE_NAME (type) && variably_modified_type_p (type, NULL_TREE)) { tree bind = NULL_TREE; if (decl_context == TYPENAME || decl_context == PARM) { bind = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = push_stmt_list (); push_scope (); } tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE); TYPE_NAME (type) = decl; if (bind) { pop_scope (); BIND_EXPR_BODY (bind) = pop_stmt_list (BIND_EXPR_BODY (bind)); if (*expr) *expr = build2 (COMPOUND_EXPR, void_type_node, *expr, bind); else *expr = bind; } } type = c_build_pointer_type (type); /* Process type qualifiers (such as const or volatile) that were given inside the `*'. */ type_quals = declarator->u.pointer_quals; declarator = declarator->declarator; break; } default: gcc_unreachable (); } } *decl_attrs = chainon (returned_attrs, *decl_attrs); *decl_attrs = chainon (decl_id_attrs, *decl_attrs); /* Now TYPE has the actual type, apart from any qualifiers in TYPE_QUALS. */ /* Warn about address space used for things other than static memory or pointers. */ address_space = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (address_space)) { if (decl_context == NORMAL) { switch (storage_class) { case csc_auto: error ("%qs combined with %<auto%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_register: error ("%qs combined with %<register%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_none: if (current_function_scope) { error ("%qs specified for auto variable %qE", c_addr_space_name (address_space), name); break; } break; case csc_static: case csc_extern: case csc_typedef: break; default: gcc_unreachable (); } } else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE) { if (name) error ("%qs specified for parameter %qE", c_addr_space_name (address_space), name); else error ("%qs specified for unnamed parameter", c_addr_space_name (address_space)); } else if (decl_context == FIELD) { if (name) error ("%qs specified for structure field %qE", c_addr_space_name (address_space), name); else error ("%qs specified for structure field", c_addr_space_name (address_space)); } } /* Check the type and width of a bit-field. */ if (bitfield) { check_bitfield_type_and_width (loc, &type, width, name); /* C11 makes it implementation-defined (6.7.2.1#5) whether atomic types are permitted for bit-fields; we have no code to make bit-field accesses atomic, so disallow them. */ if (type_quals & TYPE_QUAL_ATOMIC) { if (name) error_at (loc, "bit-field %qE has atomic type", name); else error_at (loc, "bit-field has atomic type"); type_quals &= ~TYPE_QUAL_ATOMIC; } } /* Reject invalid uses of _Alignas. */ if (declspecs->alignas_p) { if (storage_class == csc_typedef) error_at (loc, "alignment specified for typedef %qE", name); else if (storage_class == csc_register) error_at (loc, "alignment specified for %<register%> object %qE", name); else if (decl_context == PARM) { if (name) error_at (loc, "alignment specified for parameter %qE", name); else error_at (loc, "alignment specified for unnamed parameter"); } else if (bitfield) { if (name) error_at (loc, "alignment specified for bit-field %qE", name); else error_at (loc, "alignment specified for unnamed bit-field"); } else if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "alignment specified for function %qE", name); else if (declspecs->align_log != -1 && TYPE_P (type)) { alignas_align = 1U << declspecs->align_log; if (alignas_align < min_align_of_type (type)) { if (name) error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of %qE", name); else error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of unnamed field"); alignas_align = 0; } } } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (storage_class == csc_typedef) { tree decl; if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, TYPE_DECL, declarator->u.id.id, type); if (declspecs->explicit_signed_p) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl); if (warn_cxx_compat && declarator->u.id.id != NULL_TREE) { struct c_binding *b = I_TAG_BINDING (declarator->u.id.id); if (b != NULL && b->decl != NULL_TREE && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type)) { auto_diagnostic_group d; if (warning_at (declarator->id_loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), decl) && b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } return decl; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); return type; } if (pedantic && decl_context == FIELD && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.2.1p8 */ pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot " "have a variably modified type"); } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && (storage_class == csc_extern || (current_scope == file_scope && !(storage_class == csc_static || storage_class == csc_register))))) { error_at (loc, "variable or field %qE declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree promoted_type; bool array_parameter_p = false; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (orig_qual_type != NULL_TREE) { if (orig_qual_indirect == 0) orig_qual_type = TREE_TYPE (orig_qual_type); else orig_qual_indirect--; } if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); type = c_build_pointer_type (type); type_quals = array_ptr_quals; if (type_quals) type = c_build_qualified_type (type, type_quals); /* We don't yet implement attributes in this context. */ if (array_ptr_attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "attributes in parameter array declarator ignored"); size_varies = false; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = c_build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (declarator->id_loc, PARM_DECL, declarator->u.id.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; C_ARRAY_PARAMETER (decl) = array_parameter_p; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; if (declspecs->inline_p) pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl); } else if (decl_context == FIELD) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "field %qE declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { if (name) error_at (loc, "field %qE has incomplete type", name); else error_at (loc, "unnamed field has incomplete type"); type = error_mark_node; } else if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { /* We have a flexible array member through a typedef. Set suitable range. Whether this is a correct position for a flexible array member will be determined elsewhere. */ if (!in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node, NULL_TREE); if (orig_qual_indirect == 0) orig_qual_type = NULL_TREE; } if (type != error_mark_node && !verify_type_context (loc, TCTX_FIELD, type)) type = error_mark_node; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, FIELD_DECL, declarator->u.id.id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !declarator->u.id.id) { TREE_NO_WARNING (decl) = 1; DECL_PADDING_P (decl) = 1; } if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (storage_class == csc_register || threadp) { error_at (loc, "invalid storage class for function %qE", name); } else if (current_scope != file_scope) { /* Function declaration not at file scope. Storage classes other than `extern' are not allowed, C99 6.7.1p5, and `extern' makes no difference. However, GCC allows 'auto', perhaps with 'inline', to support nested functions. */ if (storage_class == csc_auto) pedwarn (loc, OPT_Wpedantic, "invalid storage class for function %qE", name); else if (storage_class == csc_static) { error_at (loc, "invalid storage class for function %qE", name); if (funcdef_flag) storage_class = declspecs->storage_class = csc_none; else return NULL_TREE; } } decl = build_decl (declarator->id_loc, FUNCTION_DECL, declarator->u.id.id, type); decl = build_decl_attribute_variant (decl, decl_attr); if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); /* Every function declaration is an external reference (DECL_EXTERNAL) except for those which are not at file scope and are explicitly declared "auto". This is forbidden by standard C (C99 6.7.1p5) and is interpreted by GCC to signify a forward declaration of a nested function. */ if (storage_class == csc_auto && current_scope != file_scope) DECL_EXTERNAL (decl) = 0; /* In C99, a function which is declared 'inline' with 'extern' is not an external reference (which is confusing). It means that the later definition of the function must be output in this file, C99 6.7.4p6. In GNU C89, a function declared 'extern inline' is an external reference. */ else if (declspecs->inline_p && storage_class != csc_static) DECL_EXTERNAL (decl) = ((storage_class == csc_extern) == flag_gnu89_inline); else DECL_EXTERNAL (decl) = !initialized; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(storage_class == csc_static || storage_class == csc_auto); /* For a function definition, record the argument information block where store_parm_decls will look for it. */ if (funcdef_flag) current_function_arg_info = arg_info; if (declspecs->default_int_p) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline' and `_Noreturn', if it is reasonable. */ if (flag_hosted && MAIN_NAME_P (declarator->u.id.id)) { if (declspecs->inline_p) pedwarn (loc, 0, "cannot inline function %<main%>"); if (declspecs->noreturn_p) pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>"); } else { if (declspecs->inline_p) /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; if (declspecs->noreturn_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Noreturn%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Noreturn%>"); TREE_THIS_VOLATILE (decl) = 1; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && storage_class == csc_extern; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator->u.id.id); tree visible_decl = lookup_name (declarator->u.id.id); if (global_decl && global_decl != visible_decl && VAR_P (global_decl) && !TREE_PUBLIC (global_decl)) error_at (loc, "variable previously declared %<static%> " "redeclared %<extern%>"); } decl = build_decl (declarator->id_loc, VAR_DECL, declarator->u.id.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl); /* At file scope, an initialized extern declaration may follow a static declaration. In that case, DECL_EXTERNAL will be reset later in start_decl. */ DECL_EXTERNAL (decl) = (storage_class == csc_extern); /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of `static' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = storage_class != csc_static; TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (storage_class == csc_static); TREE_PUBLIC (decl) = extern_ref; } if (threadp) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if ((storage_class == csc_extern || (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE && !funcdef_flag)) && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.5.2p2 */ if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "non-nested function with variably modified type"); else error_at (loc, "object with variably modified type must have " "no linkage"); } /* For nested functions disqualify ones taking VLAs by value from inlining since the middle-end cannot deal with this. ??? We should arrange for those to be passed by reference with emitting the copy on the caller side in the frontend. */ if (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE) for (tree al = TYPE_ARG_TYPES (type); al; al = TREE_CHAIN (al)) { tree arg = TREE_VALUE (al); if (arg != error_mark_node && C_TYPE_VARIABLE_SIZE (arg)) { DECL_UNINLINABLE (decl) = 1; break; } } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == csc_register) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* Apply _Alignas specifiers. */ if (alignas_align) { SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT); DECL_USER_ALIGN (decl) = 1; } /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. Of course, this only makes sense on VAR,PARM, and RESULT decl's. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)) && (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ gcc_assert (!HAS_DECL_ASSEMBLER_NAME_P (decl) || !DECL_ASSEMBLER_NAME_SET_P (decl)); if (warn_cxx_compat && VAR_P (decl) && TREE_PUBLIC (decl) && TREE_STATIC (decl) && (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl)) || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, ("non-local variable %qD with anonymous type is " "questionable in C++"), decl); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in c-parse.c if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is true for a function definition, false for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is false. */ static tree grokparms (struct c_arg_info *arg_info, bool funcdef_flag) { tree arg_types = arg_info->types; if (funcdef_flag && arg_info->had_vla_unspec) { /* A function definition isn't function prototype scope C99 6.2.1p4. */ /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than function prototype scope"); } if (arg_types == NULL_TREE && !funcdef_flag && !in_system_header_at (input_location)) warning (OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); if (arg_types == error_mark_node) /* Don't set TYPE_ARG_TYPES in this case. */ return NULL_TREE; else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (!funcdef_flag) { pedwarn (input_location, 0, "parameter names (without types) in " "function declaration"); arg_info->parms = NULL_TREE; } else arg_info->parms = arg_info->types; arg_info->types = NULL_TREE; return NULL_TREE; } else { tree parm, type, typelt; unsigned int parmno; /* In C2X, convert () in a function definition to (void). */ if (flag_isoc2x && funcdef_flag && !arg_types && !arg_info->parms) arg_types = arg_info->types = void_list_node; /* If there is a parameter of incomplete type in a definition, this is an error. In a declaration this is valid, and a struct or union type may be completed later, before any calls or definition of the function. In the case where the tag was first declared within the parameter list, a warning has already been given. If a parameter has void type, then however the function cannot be defined or called, so warn. */ for (parm = arg_info->parms, typelt = arg_types, parmno = 1; parm; parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error_at (input_location, "parameter %u (%q+D) has incomplete type", parmno, parm); else error_at (DECL_SOURCE_LOCATION (parm), "parameter %u has incomplete type", parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } else if (VOID_TYPE_P (type)) { if (DECL_NAME (parm)) warning_at (input_location, 0, "parameter %u (%q+D) has void type", parmno, parm); else warning_at (DECL_SOURCE_LOCATION (parm), 0, "parameter %u has void type", parmno); } } if (DECL_NAME (parm) && TREE_USED (parm)) warn_if_shadowing (parm); } return arg_types; } } /* Allocate and initialize a c_arg_info structure from the parser's obstack. */ struct c_arg_info * build_arg_info (void) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = NULL_TREE; ret->tags = NULL; ret->types = NULL_TREE; ret->others = NULL_TREE; ret->pending_sizes = NULL; ret->had_vla_unspec = 0; return ret; } /* Take apart the current scope and return a c_arg_info structure with info on a parameter list just parsed. This structure is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ struct c_arg_info * get_parm_info (bool ellipsis, tree expr) { struct c_binding *b = current_scope->bindings; struct c_arg_info *arg_info = build_arg_info (); tree parms = NULL_TREE; vec<c_arg_tag, va_gc> *tags = NULL; tree types = NULL_TREE; tree others = NULL_TREE; bool gave_void_only_once_err = false; arg_info->had_vla_unspec = current_scope->had_vla_unspec; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ gcc_assert (b); /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED || C_DECL_REGISTER (b->decl)) error_at (b->locus, "%<void%> as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error_at (b->locus, "%<void%> must be the only parameter"); arg_info->types = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); c_arg_tag tag; const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error_at (b->locus, "parameter %q+D has just a forward declaration", decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error_at (b->locus, "%<void%> must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ DECL_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != NULL_TREE) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning_at (b->locus, 0, "%<%s %E%> declared inside parameter list" " will not be visible outside of this definition or" " declaration", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning_at (b->locus, 0, "anonymous %s declared inside parameter list" " will not be visible outside of this definition or" " declaration", keyword); } tag.id = b->id; tag.type = decl; vec_safe_push (tags, tag); break; case FUNCTION_DECL: /* FUNCTION_DECLs appear when there is an implicit function declaration in the parameter list. */ gcc_assert (b->nested || seen_error ()); goto set_shadowed; case CONST_DECL: case TYPE_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. */ /* When we reinsert this decl in the function body, we need to reconstruct whether it was marked as nested. */ gcc_assert (!b->nested); DECL_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: set_shadowed: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case VAR_DECL: default: gcc_unreachable (); } b = free_binding_and_advance (b); } arg_info->parms = parms; arg_info->tags = tags; arg_info->types = types; arg_info->others = others; arg_info->pending_sizes = expr; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference with location LOC if it is not defined. HAVE_STD_ATTRS says whether any standard attributes were present after the struct, union or enum keyword; ATTRS are the standard attributes present there. Return a c_typespec structure for the type specifier. */ struct c_typespec parser_xref_tag (location_t loc, enum tree_code code, tree name, bool have_std_attrs, tree attrs) { struct c_typespec ret; tree ref; location_t refloc; ret.expr = NULL_TREE; ret.expr_const_operands = true; /* If a cross reference is requested, look up the type already defined for this tag and return it. */ ref = lookup_tag (code, name, false, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ ret.kind = (ref ? (have_std_attrs ? ctsk_tagref_attrs : ctsk_tagref) : (have_std_attrs ? ctsk_tagfirstref_attrs : ctsk_tagfirstref)); if (ref && TREE_CODE (ref) == code) { decl_attributes (&ref, attrs, (int) ATTR_FLAG_TYPE_IN_PLACE); if (C_TYPE_DEFINED_IN_STRUCT (ref) && loc != UNKNOWN_LOCATION && warn_cxx_compat) { switch (code) { case ENUMERAL_TYPE: warning_at (loc, OPT_Wc___compat, ("enum type defined in struct or union " "is not visible in C++")); inform (refloc, "enum type defined here"); break; case RECORD_TYPE: warning_at (loc, OPT_Wc___compat, ("struct defined in struct or union " "is not visible in C++")); inform (refloc, "struct defined here"); break; case UNION_TYPE: warning_at (loc, OPT_Wc___compat, ("union defined in struct or union " "is not visible in C++")); inform (refloc, "union defined here"); break; default: gcc_unreachable(); } } ret.spec = ref; return ret; } /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node)); SET_TYPE_ALIGN (ref, TYPE_ALIGN (unsigned_type_node)); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (loc, name, ref); decl_attributes (&ref, attrs, (int) ATTR_FLAG_TYPE_IN_PLACE); ret.spec = ref; return ret; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. Return a tree for the type. */ tree xref_tag (enum tree_code code, tree name) { return parser_xref_tag (input_location, code, name, false, NULL_TREE).spec; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. LOC is the location of the struct's definition. CODE says which kind of tag NAME ought to be. This stores the current value of the file static STRUCT_PARSE_INFO in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a new c_struct_parse_info structure. The old value of STRUCT_PARSE_INFO is restored in finish_struct. */ tree start_struct (location_t loc, enum tree_code code, tree name, class c_struct_parse_info **enclosing_struct_parse_info) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = NULL_TREE; location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) ref = lookup_tag (code, name, true, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_STUB_DECL (ref)) refloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (ref)); if (TYPE_SIZE (ref)) { if (code == UNION_TYPE) error_at (loc, "redefinition of %<union %E%>", name); else error_at (loc, "redefinition of %<struct %E%>", name); if (refloc != UNKNOWN_LOCATION) inform (refloc, "originally defined here"); /* Don't create structures using a name already in use. */ ref = NULL_TREE; } else if (C_TYPE_BEING_DEFINED (ref)) { if (code == UNION_TYPE) error_at (loc, "nested redefinition of %<union %E%>", name); else error_at (loc, "nested redefinition of %<struct %E%>", name); /* Don't bother to report "originally defined here" for a nested redefinition; the original definition should be obvious. */ /* Don't create structures that contain themselves. */ ref = NULL_TREE; } } /* Otherwise create a forward-reference just so the tag is in scope. */ if (ref == NULL_TREE || TREE_CODE (ref) != code) { ref = make_node (code); pushtag (loc, name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; for (tree v = TYPE_MAIN_VARIANT (ref); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = flag_pack_struct; *enclosing_struct_parse_info = struct_parse_info; struct_parse_info = new c_struct_parse_info (); /* FIXME: This will issue a warning for a use of a type defined within a statement expr used within sizeof, et. al. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return ref; } /* Process the specs, declarator and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. DECL_ATTRS is as for grokdeclarator. LOC is the location of the structure component. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (location_t loc, struct c_declarator *declarator, struct c_declspecs *declspecs, tree width, tree *decl_attrs) { tree value; if (declarator->kind == cdk_id && declarator->u.id.id == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS or Plan 9 extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If foo names a structure or union without a tag, then this is an anonymous struct (this is permitted by C11). If MS or Plan 9 extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = declspecs->type; bool ok = false; if (RECORD_OR_UNION_TYPE_P (type) && (flag_ms_extensions || flag_plan9_extensions || !declspecs->typedef_p)) { if (flag_ms_extensions || flag_plan9_extensions) ok = true; else if (TYPE_NAME (type) == NULL) ok = true; else ok = false; } if (!ok) { pedwarn (loc, 0, "declaration does not declare anything"); return NULL_TREE; } if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 doesn%'t support unnamed structs/unions"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 doesn%'t support unnamed structs/unions"); } value = grokdeclarator (declarator, declspecs, FIELD, false, width ? &width : NULL, decl_attrs, NULL, NULL, DEPRECATED_NORMAL); finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; if (width) SET_DECL_C_BIT_FIELD (value); if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE) { /* If we currently have a binding for this field, set the in_struct field in the binding, so that we warn about lookups which find it. */ struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value)); if (b != NULL) { /* If the in_struct field is not yet set, push it on a list to be cleared when this struct is finished. */ if (!b->in_struct) { struct_parse_info->fields.safe_push (b); b->in_struct = 1; } } } return value; } /* Subroutine of detect_field_duplicates: return whether X and Y, which are both fields in the same struct, have duplicate field names. */ static bool is_duplicate_field (tree x, tree y) { if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y)) return true; /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE)) { tree xt, xn, yt, yn; xt = TREE_TYPE (x); if (DECL_NAME (x) != NULL_TREE) xn = DECL_NAME (x); else if (RECORD_OR_UNION_TYPE_P (xt) && TYPE_NAME (xt) != NULL_TREE && TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL) xn = DECL_NAME (TYPE_NAME (xt)); else xn = NULL_TREE; yt = TREE_TYPE (y); if (DECL_NAME (y) != NULL_TREE) yn = DECL_NAME (y); else if (RECORD_OR_UNION_TYPE_P (yt) && TYPE_NAME (yt) != NULL_TREE && TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL) yn = DECL_NAME (TYPE_NAME (yt)); else yn = NULL_TREE; if (xn != NULL_TREE && xn == yn) return true; } return false; } /* Subroutine of detect_field_duplicates: add the fields of FIELDLIST to HTAB, giving errors for any duplicates. */ static void detect_field_duplicates_hash (tree fieldlist, hash_table<nofree_ptr_hash <tree_node> > *htab) { tree x, y; tree_node **slot; for (x = fieldlist; x ; x = DECL_CHAIN (x)) if ((y = DECL_NAME (x)) != NULL_TREE) { slot = htab->find_slot (y, INSERT); if (*slot) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } *slot = y; } else if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) { detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab); /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL) { tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x))); slot = htab->find_slot (xn, INSERT); if (*slot) error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x))); *slot = xn; } } } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* If the struct is the list of instance variables of an Objective-C class, then we need to check all the instance variables of superclasses when checking for duplicates (since you can't have an instance variable in a subclass with the same name as an instance variable in a superclass). We pass on this job to the Objective-C compiler. objc_detect_field_duplicates() will return false if we are not checking the list of instance variables and the C frontend should proceed with the standard field duplicate checks. If we are checking the list of instance variables, the ObjC frontend will do the check, emit the errors if needed, and then return true. */ if (c_dialect_objc ()) if (objc_detect_field_duplicates (false)) return; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist || !DECL_CHAIN (fieldlist)) return; x = fieldlist; do { timeout--; if (DECL_NAME (x) == NULL_TREE && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) timeout = 0; x = DECL_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields and no anonymous structures or unions, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x)) /* When using -fplan9-extensions, we can have duplicates between typedef names and fields. */ if (DECL_NAME (x) || (flag_plan9_extensions && DECL_NAME (x) == NULL_TREE && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)) && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (is_duplicate_field (y, x)) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } } } else { hash_table<nofree_ptr_hash <tree_node> > htab (37); detect_field_duplicates_hash (fieldlist, &htab); } } /* Finish up struct info used by -Wc++-compat. */ static void warn_cxx_compat_finish_struct (tree fieldlist, enum tree_code code, location_t record_loc) { unsigned int ix; tree x; struct c_binding *b; if (fieldlist == NULL_TREE) { if (code == RECORD_TYPE) warning_at (record_loc, OPT_Wc___compat, "empty struct has size 0 in C, size 1 in C++"); else warning_at (record_loc, OPT_Wc___compat, "empty union has size 0 in C, size 1 in C++"); } /* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in the current struct. We do this now at the end of the struct because the flag is used to issue visibility warnings, and we only want to issue those warnings if the type is referenced outside of the struct declaration. */ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x) C_TYPE_DEFINED_IN_STRUCT (x) = 1; /* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of typedefs used when declaring fields in this struct. If the name of any of the fields is also a typedef name then the struct would not parse in C++, because the C++ lookup rules say that the typedef name would be looked up in the context of the struct, and would thus be the field rather than the typedef. */ if (!struct_parse_info->typedefs_seen.is_empty () && fieldlist != NULL_TREE) { /* Use a hash_set<tree> using the name of the typedef. We can use a hash_set<tree> because identifiers are interned. */ hash_set<tree> tset; FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x) tset.add (DECL_NAME (x)); for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE && tset.contains (DECL_NAME (x))) { warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat, ("using %qD as both field and typedef name is " "invalid in C++"), x); /* FIXME: It would be nice to report the location where the typedef name is used. */ } } } /* For each field which has a binding and which was not defined in an enclosing struct, clear the in_struct field. */ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b) b->in_struct = 0; } /* Function to help qsort sort FIELD_DECLs by name order. */ static int field_decl_cmp (const void *x_p, const void *y_p) { const tree *const x = (const tree *) x_p; const tree *const y = (const tree *) y_p; if (DECL_NAME (*x) == DECL_NAME (*y)) /* A nontype is "greater" than a type. */ return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL); if (DECL_NAME (*x) == NULL_TREE) return -1; if (DECL_NAME (*y) == NULL_TREE) return 1; if (DECL_NAME (*x) < DECL_NAME (*y)) return -1; return 1; } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ static void finish_incomplete_vars (tree incomplete_vars, bool toplevel) { for (tree x = incomplete_vars; x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { relayout_decl (decl); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, toplevel, 0); } } } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. LOC is the location of the RECORD_TYPE or UNION_TYPE's definition. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when the struct was started. */ tree finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, class c_struct_parse_info *enclosing_struct_parse_info) { tree x; bool toplevel = file_scope == current_scope; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = NULL_TREE; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE) break; if (flag_isoc11 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) break; } if (x == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "union has no named members"); else pedwarn (loc, OPT_Wpedantic, "union has no members"); } else { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "struct has no named members"); else pedwarn (loc, OPT_Wpedantic, "struct has no members"); } } } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes, found in the DECL_INITIAL, storing 0 there after the type has been changed to precision equal to its width, rather than the precision of the specified standard type. (Correct layout requires the original type to have been preserved until now.) */ bool saw_named_field = false; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (TREE_TYPE (x) == error_mark_node) continue; DECL_CONTEXT (x) = t; /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = strip_array_types (TREE_TYPE (x)); if (RECORD_OR_UNION_TYPE_P (t1) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; if (DECL_C_BIT_FIELD (x)) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; } if (TYPE_PACKED (t) && (DECL_BIT_FIELD (x) || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)) DECL_PACKED (x) = 1; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in union"); TREE_TYPE (x) = error_mark_node; } else if (DECL_CHAIN (x) != NULL_TREE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member not at end of struct"); TREE_TYPE (x) = error_mark_node; } else if (!saw_named_field) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in a struct with no named " "members"); TREE_TYPE (x) = error_mark_node; } } if (pedantic && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic, "invalid use of structure with flexible array member"); if (DECL_NAME (x) || RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) saw_named_field = true; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; maybe_apply_pragma_scalar_storage_order (t); layout_type (t); if (TYPE_SIZE_UNIT (t) && TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST && !TREE_OVERFLOW (TYPE_SIZE_UNIT (t)) && !valid_constant_size_p (TYPE_SIZE_UNIT (t))) error ("type %qT is too large", t); /* Give bit-fields their proper types and rewrite the type of array fields with scalar component if the enclosing type has reverse storage order. */ for (tree field = fieldlist; field; field = DECL_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL && DECL_INITIAL (field) && TREE_TYPE (field) != error_mark_node) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (field)); tree type = TREE_TYPE (field); if (width != TYPE_PRECISION (type)) { TREE_TYPE (field) = c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type)); SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field))); } DECL_INITIAL (field) = NULL_TREE; } else if (TYPE_REVERSE_STORAGE_ORDER (t) && TREE_CODE (field) == FIELD_DECL && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE) { tree ftype = TREE_TYPE (field); tree ctype = strip_array_types (ftype); if (!RECORD_OR_UNION_TYPE_P (ctype) && TYPE_MODE (ctype) != QImode) { tree fmain_type = TYPE_MAIN_VARIANT (ftype); tree *typep = &fmain_type; do { *typep = build_distinct_type_copy (*typep); TYPE_REVERSE_STORAGE_ORDER (*typep) = 1; typep = &TREE_TYPE (*typep); } while (TREE_CODE (*typep) == ARRAY_TYPE); TREE_TYPE (field) = c_build_qualified_type (fmain_type, TYPE_QUALS (ftype)); } } } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_cleared_alloc<struct lang_type> (); space2 = (sorted_fields_type *) ggc_internal_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = DECL_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_AGGR (t) && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))) { TYPE_TRANSPARENT_AGGR (t) = 0; warning_at (loc, 0, "union cannot be made transparent"); } tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); TYPE_TRANSPARENT_AGGR (x) = TYPE_TRANSPARENT_AGGR (t); C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t); C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t); C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t); C_TYPE_INCOMPLETE_VARS (x) = NULL_TREE; } /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ if (TYPE_STUB_DECL (t)) DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); finish_incomplete_vars (incomplete_vars, toplevel); /* If we're inside a function proper, i.e. not file-scope and not still parsing parameters, then arrange for the size of a variable sized type to be bound now. */ if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE)) add_stmt (build_stmt (loc, DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t))); if (warn_cxx_compat) warn_cxx_compat_finish_struct (fieldlist, TREE_CODE (t), loc); delete struct_parse_info; struct_parse_info = enclosing_struct_parse_info; /* If this struct is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (t); return t; } static struct { gt_pointer_operator new_value; void *cookie; } resort_data; /* This routine compares two fields like field_decl_cmp but using the pointer operator in resort_data. */ static int resort_field_decl_cmp (const void *x_p, const void *y_p) { const tree *const x = (const tree *) x_p; const tree *const y = (const tree *) y_p; if (DECL_NAME (*x) == DECL_NAME (*y)) /* A nontype is "greater" than a type. */ return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL); if (DECL_NAME (*x) == NULL_TREE) return -1; if (DECL_NAME (*y) == NULL_TREE) return 1; { tree d1 = DECL_NAME (*x); tree d2 = DECL_NAME (*y); resort_data.new_value (&d1, resort_data.cookie); resort_data.new_value (&d2, resort_data.cookie); if (d1 < d2) return -1; } return 1; } /* Resort DECL_SORTED_FIELDS because pointers have been reordered. */ void resort_sorted_fields (void *obj, void * ARG_UNUSED (orig_obj), gt_pointer_operator new_value, void *cookie) { struct sorted_fields_type *sf = (struct sorted_fields_type *) obj; resort_data.new_value = new_value; resort_data.cookie = cookie; qsort (&sf->elts[0], sf->len, sizeof (tree), resort_field_decl_cmp); } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). LOC is the enum's location. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) { tree enumtype = NULL_TREE; location_t enumloc = UNKNOWN_LOCATION; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != NULL_TREE) enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc); if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); } /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ else if (TYPE_STUB_DECL (enumtype)) { enumloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)); DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc; } if (C_TYPE_BEING_DEFINED (enumtype)) error_at (loc, "nested redefinition of %<enum %E%>", name); C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != NULL_TREE) { /* This enum is a named one that has been declared already. */ error_at (loc, "redeclaration of %<enum %E%>", name); if (enumloc != UNKNOWN_LOCATION) inform (enumloc, "originally defined here"); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = NULL_TREE; } the_enum->enum_next_value = integer_zero_node; the_enum->enum_overflow = 0; if (flag_short_enums) for (tree v = TYPE_MAIN_VARIANT (enumtype); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; /* FIXME: This will issue a warning for a use of a type defined within sizeof in a statement expr. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = NULL_TREE, maxnode = NULL_TREE; int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; precision = MAX (tree_int_cst_min_precision (minnode, sign), tree_int_cst_min_precision (maxnode, sign)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype) && lookup_attribute ("mode", attributes)) { if (precision > TYPE_PRECISION (enumtype)) { TYPE_PRECISION (enumtype) = 0; error ("specified mode too small for enumerated values"); } else precision = TYPE_PRECISION (enumtype); } else TYPE_PRECISION (enumtype) = 0; if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node) || TYPE_PRECISION (enumtype)) { tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); SET_TYPE_ALIGN (enumtype, TYPE_ALIGN (tem)); TYPE_SIZE (enumtype) = NULL_TREE; TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. build_enumerator() converts any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already warned about those that don't fit. Here we convert the rest to the enumerator type. */ if (TREE_TYPE (ini) != integer_type_node) ini = convert (enumtype, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_cleared_alloc<struct lang_type> (); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (enumtype)); for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { C_TYPE_INCOMPLETE_VARS (tem) = NULL_TREE; if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); SET_TYPE_MODE (tem, TYPE_MODE (enumtype)); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); SET_TYPE_ALIGN (tem, TYPE_ALIGN (enumtype)); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); finish_incomplete_vars (incomplete_vars, toplevel); /* If this enum is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (enumtype); C_TYPE_BEING_DEFINED (enumtype) = 0; return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). DECL_LOC is the location of the enumerator. LOC is the location of the '=' operator if any, DECL_LOC otherwise. Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (location_t decl_loc, location_t loc, struct c_enum_contents *the_enum, tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ if (value != NULL_TREE) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = NULL_TREE; else if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (loc, "enumerator value for %qE is not an integer constant", name); value = NULL_TREE; } else { if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); if (TREE_CODE (value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "enumerator value for %qE is not an integer " "constant expression", name); } if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qE is not an integer constant", name); value = NULL_TREE; } else { value = default_conversion (value); constant_expression_warning (value); } } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == NULL_TREE) { value = the_enum->enum_next_value; if (the_enum->enum_overflow) error_at (loc, "overflow in enumeration values"); } /* Even though the underlying type of an enum is unspecified, the type of enumeration constants is explicitly defined as int (6.4.4.3/2 in the C99 Standard). GCC allows any integer type as an extension. */ else if (!int_fits_type_p (value, integer_type_node)) pedwarn (loc, OPT_Wpedantic, "ISO C restricts enumerator values to range of %<int%>"); /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, we would have already warned about those that don't fit. We have to do this here rather than in finish_enum because this value may be used to define more enumerators. */ if (int_fits_type_p (value, integer_type_node)) value = convert (integer_type_node, value); /* Set basis for default for next value. */ the_enum->enum_next_value = build_binary_op (EXPR_LOC_OR_LOC (value, input_location), PLUS_EXPR, value, integer_one_node, false); the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (decl_loc, CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Implement LANG_HOOKS_SIMULATE_ENUM_DECL. */ tree c_simulate_enum_decl (location_t loc, const char *name, vec<string_int_pair> values) { location_t saved_loc = input_location; input_location = loc; struct c_enum_contents the_enum; tree enumtype = start_enum (loc, &the_enum, get_identifier (name)); tree value_chain = NULL_TREE; string_int_pair *value; unsigned int i; FOR_EACH_VEC_ELT (values, i, value) { tree decl = build_enumerator (loc, loc, &the_enum, get_identifier (value->first), build_int_cst (integer_type_node, value->second)); TREE_CHAIN (decl) = value_chain; value_chain = decl; } finish_enum (enumtype, nreverse (value_chain), NULL_TREE); input_location = saved_loc; return enumtype; } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns true on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return false to report a parse error. */ bool start_function (struct c_declspecs *declspecs, struct c_declarator *declarator, tree attributes) { tree decl1, old_decl; tree restype, resdecl; location_t loc; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL, &attributes, NULL, NULL, DEPRECATED_NORMAL); invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == NULL_TREE || TREE_CODE (decl1) != FUNCTION_DECL) return false; loc = DECL_SOURCE_LOCATION (decl1); /* A nested function is not global. */ if (current_function_decl != NULL_TREE) TREE_PUBLIC (decl1) = 0; c_decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning_at (loc, OPT_Wattributes, "inline function %qD given attribute %qs", decl1, "noinline"); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl1) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1)) || current_function_decl)) { if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1); } announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error_at (loc, "return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int : (warn_return_type > 0 ? OPT_Wreturn_type : OPT_Wimplicit_int), "return type defaults to %<int%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL) old_decl = NULL_TREE; current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_built_in = false; current_function_prototype_arg_types = NULL_TREE; if (!prototype_p (TREE_TYPE (decl1))) { if (old_decl != NULL_TREE && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (TREE_TYPE (old_decl)))) { if (stdarg_p (TREE_TYPE (old_decl))) { auto_diagnostic_group d; warning_at (loc, 0, "%q+D defined as variadic function " "without prototype", decl1); locate_old_decl (old_decl); } TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl), TREE_TYPE (decl1)); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (old_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl1)); } if (TREE_PUBLIC (decl1)) { /* If there is an external prototype declaration of this function, record its location but do not copy information to this decl. This may be an invisible declaration (built-in or in a scope which has finished) or simply have more refined argument types than any declaration found above. */ struct c_binding *b; for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed) if (B_IN_SCOPE (b, external_scope)) break; if (b) { tree ext_decl, ext_type; ext_decl = b->decl; ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl); if (TREE_CODE (ext_type) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (ext_type))) { current_function_prototype_locus = DECL_SOURCE_LOCATION (ext_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (ext_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (ext_type); } } } } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && old_decl != error_mark_node && !prototype_p (TREE_TYPE (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning_at (loc, OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && old_decl != error_mark_node && TREE_PUBLIC (decl1) && !MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_prototypes, "no previous prototype for %qD", decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != NULL_TREE && old_decl != error_mark_node && TREE_USED (old_decl) && !prototype_p (TREE_TYPE (old_decl))) warning_at (loc, OPT_Wmissing_prototypes, "%qD was used with no prototype before its definition", decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == NULL_TREE && !MAIN_NAME_P (DECL_NAME (decl1)) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != NULL_TREE && old_decl != error_mark_node && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning_at (loc, OPT_Wmissing_declarations, "%qD was used with no declaration before its definition", decl1); /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1)); /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main && MAIN_NAME_P (DECL_NAME (decl1))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1); else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1)))) pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD", decl1); check_main_parameter_types (decl1); if (!TREE_PUBLIC (decl1)) pedwarn (loc, OPT_Wmain, "%qD is normally a non-static function", decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (current_function_decl) = resdecl; start_fname_decls (); return true; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info) { tree decl; c_arg_tag *tag; unsigned ix; if (current_scope->bindings) { error_at (DECL_SOURCE_LOCATION (fndecl), "old-style parameter declarations in prototyped " "function definition"); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (!in_system_header_at (input_location) && !current_function_scope && arg_info->types != error_mark_node) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional, "traditional C rejects ISO C style function definitions"); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) { bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); if (!TREE_USED (decl)) warn_if_shadowing (decl); } else error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted"); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = arg_info->parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/(TREE_CODE (decl) == FUNCTION_DECL), UNKNOWN_LOCATION); } /* And all the tag declarations. */ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) if (tag->id) bind (tag->id, tag->type, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = arg_info->parms; hash_set<tree> seen_args; if (!in_system_header_at (input_location)) { if (flag_isoc2x) pedwarn (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); else warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); } if (current_scope->had_vla_unspec) error ("%<[*]%> not allowed in other than function prototype scope"); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == NULL_TREE) { error_at (DECL_SOURCE_LOCATION (fndecl), "parameter name missing from parameter list"); TREE_PURPOSE (parm) = NULL_TREE; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* Skip erroneous parameters. */ if (decl == error_mark_node) continue; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) { error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as a non-parameter", decl); continue; } /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (seen_args.contains (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "multiple parameters named %qD", decl); TREE_PURPOSE (parm) = NULL_TREE; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error_at (DECL_SOURCE_LOCATION (decl), "parameter %qD declared with void type", decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } warn_if_shadowing (decl); } /* If no declaration found, default to int. */ else { /* FIXME diagnostics: This should be the location of the argument, not the FNDECL. E.g., for an old-style declaration int f10(v) { blah; } We should use the location of the V, not the F10. Unfortunately, the V is an IDENTIFIER_NODE which has no location. In the future we need locations for c_arg_info entries. See gcc.dg/Wshadow-3.c for an example of this problem. */ decl = build_decl (DECL_SOURCE_LOCATION (fndecl), PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); pushdecl (decl); warn_if_shadowing (decl); if (flag_isoc99) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wimplicit_int, "type of %qD defaults to %<int%>", decl); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wmissing_parameter_type, "type of %qD defaults to %<int%>", decl); } TREE_PURPOSE (parm) = decl; seen_args.add (decl); } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (TREE_TYPE (parm) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (parm))) { error_at (DECL_SOURCE_LOCATION (parm), "parameter %qD has incomplete type", parm); TREE_TYPE (parm) = error_mark_node; } if (!seen_args.contains (parm)) { error_at (DECL_SOURCE_LOCATION (parm), "declaration for parameter %qD but no such parameter", parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = NULL_TREE; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { DECL_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); } DECL_CHAIN (last) = NULL_TREE; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (current_function_prototype_arg_types) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = current_function_prototype_arg_types; parm || (type != NULL_TREE && TREE_VALUE (type) != error_mark_node && TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node); parm = DECL_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == NULL_TREE || type == NULL_TREE || (TREE_VALUE (type) != error_mark_node && TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)) { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (fndecl), 0, "number of arguments doesn%'t match " "built-in prototype"); else { /* FIXME diagnostics: This should be the location of FNDECL, but there is bug when a prototype is declared inside function context, but defined outside of it (e.g., gcc.dg/pr15698-2.c). In which case FNDECL gets the location of the prototype, not the definition. */ error_at (input_location, "number of arguments doesn%'t match prototype"); error_at (current_function_prototype_locus, "prototype declaration"); } break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (TREE_TYPE (parm) != error_mark_node && TREE_VALUE (type) != error_mark_node && ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) != TYPE_ATOMIC (TREE_VALUE (type))) || !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type))))) { if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) == TYPE_ATOMIC (TREE_VALUE (type))) && (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && (TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node))) DECL_ARG_TYPE (parm) = c_type_promotes_to (TREE_TYPE (parm)); /* ??? Is it possible to get here with a built-in prototype or will it always have been diagnosed as conflicting with an old-style definition and discarded? */ if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match built-in prototype", parm); else { pedwarn (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match prototype", parm); pedwarn (current_function_prototype_locus, OPT_Wpedantic, "prototype declaration"); } } else { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), 0, "argument %qD doesn%'t match " "built-in prototype", parm); else { error_at (DECL_SOURCE_LOCATION (parm), "argument %qD doesn%'t match prototype", parm); error_at (current_function_prototype_locus, "prototype declaration"); } } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = NULL_TREE; } /* Otherwise, create a prototype that would match. */ else { tree actual = NULL_TREE, last = NULL_TREE, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store parameter declarations passed in ARG_INFO into the current function declaration. */ void store_parm_decls_from (struct c_arg_info *arg_info) { current_function_arg_info = arg_info; store_parm_decls (); } /* Called by walk_tree to look for and update context-less labels or labels with context in the parent function. */ static tree set_labels_context_r (tree *tp, int *walk_subtrees, void *data) { tree ctx = static_cast<tree>(data); if (TREE_CODE (*tp) == LABEL_EXPR && (DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == NULL_TREE || DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == DECL_CONTEXT (ctx))) { DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) = ctx; *walk_subtrees = 0; } return NULL_TREE; } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; bool proto; /* The argument information block for FNDECL. */ struct c_arg_info *arg_info = current_function_arg_info; current_function_arg_info = 0; /* True if this definition is written with a prototype. In C2X, an empty argument list was converted to (void) in grokparms; in older C standard versions, it does not give the function a type with a prototype for future calls. */ proto = arg_info->types != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl, false); if (warn_unused_local_typedefs) cfun->language = ggc_cleared_alloc<language_function> (); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. The only reason left to have this is void foo(int n, int array[n++]) because we throw away the array type in favor of a pointer type, and thus won't naturally see the SAVE_EXPR containing the increment. All other pending sizes would be handled by gimplify_parameters. */ if (arg_info->pending_sizes) { /* In very special circumstances, e.g. for code like _Atomic int i = 5; void f (int a[i += 2]) {} we need to execute the atomic assignment on function entry. But in this case, it is not just a straight store, it has the op= form, which means that build_atomic_assign has generated gotos, labels, etc. Because at that time the function decl for F has not been created yet, those labels do not have any function context. But we have the fndecl now, so update the labels accordingly. gimplify_expr would crash otherwise. Or with nested functions the labels could be created with parent function's context, while when the statement is emitted at the start of the nested function, it needs the nested function's context. */ walk_tree_without_duplicates (&arg_info->pending_sizes, set_labels_context_r, fndecl); add_stmt (arg_info->pending_sizes); } } /* Store PARM_DECLs in PARMS into scope temporarily. Used for c_finish_omp_declare_simd for function prototypes. No diagnostics should be done. */ void temp_store_parm_decls (tree fndecl, tree parms) { push_scope (); for (tree p = parms; p; p = DECL_CHAIN (p)) { DECL_CONTEXT (p) = fndecl; if (DECL_NAME (p)) bind (DECL_NAME (p), p, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } } /* Undo what temp_store_parm_decls did. */ void temp_pop_parm_decls (void) { /* Clear all bindings in this temporary scope, so that pop_scope doesn't create a BLOCK. */ struct c_binding *b = current_scope->bindings; current_scope->bindings = NULL; for (; b; b = free_binding_and_advance (b)) { gcc_assert (TREE_CODE (b->decl) == PARM_DECL || b->decl == error_mark_node); gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } pop_scope (); } /* Finish up a function declaration and compile that function all the way to assembler language output. Then free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (location_t end_loc) { tree fndecl = current_function_decl; if (c_dialect_objc ()) objc_finish_function (); if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) == integer_type_node && flag_isoc99) { /* Hack. We don't want the middle-end to warn that this return is unreachable, so we mark its location as special. Using UNKNOWN_LOCATION has the problem that it gets clobbered in annotate_one_with_locus. A cleaner solution might be to ensure ! should_carry_locus_p (stmt), but that needs a flag. */ c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE); } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); finish_fname_decls (); /* Complain if there's no return statement only if option specified on command line. */ if (warn_return_type > 0 && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we are no-return. */ && !current_function_returns_abnormally /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain, but we might optimize out static functions. */ && !TREE_PUBLIC (fndecl) && targetm.warn_func_return (fndecl) && warning (OPT_Wreturn_type, "no return statement in function returning non-void")) TREE_NO_WARNING (fndecl) = 1; /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Possibly warn about unused parameters. */ if (warn_unused_parameter) do_warn_unused_parameter (fndecl); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = end_loc; /* Finalize the ELF visibility for the function. */ c_determine_visibility (fndecl); /* For GNU C extern inline functions disregard inline limits. */ if (DECL_EXTERNAL (fndecl) && DECL_DECLARED_INLINE_P (fndecl) && (flag_gnu89_inline || lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (fndecl)))) DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1; /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node && !undef_nested_function) { if (!decl_function_context (fndecl)) { invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); c_genericize (fndecl); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (symtab->global_info_ready) { cgraph_node::add_new_function (fndecl, false); return; } cgraph_node::finalize_function (fndecl, false); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node::get_create (fndecl); } } if (!decl_function_context (fndecl)) undef_nested_function = false; if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, current_function_decl); current_function_decl = NULL; } /* Check the declarations given in a for-loop for satisfying the C99 constraints. If exactly one such decl is found, return it. LOC is the location of the opening parenthesis of the for loop. The last parameter allows you to control the "for loop initial declarations are only allowed in C99 mode". Normally, you should pass flag_isoc99 as that parameter. But in some cases (Objective-C foreach loop, for example) we want to run the checks in this function even if not in C99 mode, so we allow the caller to turn off the error about not being in C99 mode. */ tree check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error) { struct c_binding *b; tree one_decl = NULL_TREE; int n_decls = 0; if (!turn_off_iso_c99_error) { static bool hint = true; /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error_at (loc, "%<for%> loop initial declarations " "are only allowed in C99 or C11 mode"); if (hint) { inform (loc, "use option %<-std=c99%>, %<-std=gnu99%>, %<-std=c11%> or " "%<-std=gnu11%> to compile your code"); hint = false; } return NULL_TREE; } else pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<for%> loop " "initial declarations"); /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: { location_t decl_loc = DECL_SOURCE_LOCATION (decl); if (TREE_STATIC (decl)) error_at (decl_loc, "declaration of static variable %qD in %<for%> loop " "initial declaration", decl); else if (DECL_EXTERNAL (decl)) error_at (decl_loc, "declaration of %<extern%> variable %qD in %<for%> loop " "initial declaration", decl); } break; case RECORD_TYPE: error_at (loc, "%<struct %E%> declared in %<for%> loop initial " "declaration", id); break; case UNION_TYPE: error_at (loc, "%<union %E%> declared in %<for%> loop initial declaration", id); break; case ENUMERAL_TYPE: error_at (loc, "%<enum %E%> declared in %<for%> loop " "initial declaration", id); break; default: error_at (loc, "declaration of non-variable " "%qD in %<for%> loop initial declaration", decl); } n_decls++; one_decl = decl; } return n_decls == 1 ? one_decl : NULL_TREE; } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (void) { struct language_function *p = cfun->language; /* cfun->language might have been already allocated by the use of -Wunused-local-typedefs. In that case, just re-use it. */ if (p == NULL) cfun->language = p = ggc_cleared_alloc<language_function> (); p->base.x_stmt_tree = c_stmt_tree; c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list); p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->arg_info = current_function_arg_info; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; push_function_context (); } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (void) { struct language_function *p; pop_function_context (); p = cfun->language; /* When -Wunused-local-typedefs is in effect, cfun->languages is used to store data throughout the life time of the current cfun, So don't deallocate it. */ if (!warn_unused_local_typedefs) cfun->language = NULL; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = NULL_TREE; } c_stmt_tree = p->base.x_stmt_tree; p->base.x_stmt_tree.x_cur_stmt_list = NULL; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_arg_info = p->arg_info; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return NULL_TREE; } /* Return the global value of tag T as a symbol. */ tree identifier_global_tag (tree t) { struct c_binding *b; for (b = I_TAG_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return NULL_TREE; } /* Returns true if NAME refers to a built-in function or function-like operator. */ bool names_builtin_p (const char *name) { tree id = get_identifier (name); if (tree decl = identifier_global_value (id)) return TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl); /* Also detect common reserved C words that aren't strictly built-in functions. */ switch (C_RID_CODE (id)) { case RID_BUILTIN_CONVERTVECTOR: case RID_BUILTIN_HAS_ATTRIBUTE: case RID_BUILTIN_SHUFFLE: case RID_CHOOSE_EXPR: case RID_OFFSETOF: case RID_TYPES_COMPATIBLE_P: return true; default: break; } return false; } /* In C, the only C-linkage public declaration is at file scope. */ tree c_linkage_bindings (tree name) { return identifier_global_value (name); } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id, decl; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type); pushdecl (decl); if (debug_hooks->type_decl) debug_hooks->type_decl (decl, false); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */ struct c_parm * build_c_parm (struct c_declspecs *specs, tree attrs, struct c_declarator *declarator, location_t loc) { struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm); ret->specs = specs; ret->attrs = attrs; ret->declarator = declarator; ret->loc = loc; return ret; } /* Return a declarator with nested attributes. TARGET is the inner declarator to which these attributes apply. ATTRS are the attributes. */ struct c_declarator * build_attrs_declarator (tree attrs, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_attrs; ret->declarator = target; ret->u.attrs = attrs; return ret; } /* Return a declarator for a function with arguments specified by ARGS and return type specified by TARGET. */ struct c_declarator * build_function_declarator (struct c_arg_info *args, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_function; ret->declarator = target; ret->u.arg_info = args; return ret; } /* Return a declarator for the identifier IDENT (which may be NULL_TREE for an abstract declarator). */ struct c_declarator * build_id_declarator (tree ident) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_id; ret->declarator = 0; ret->u.id.id = ident; ret->u.id.attrs = NULL_TREE; /* Default value - may get reset to a more precise location. */ ret->id_loc = input_location; return ret; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes to apply to the pointer type. */ struct c_declarator * make_pointer_declarator (struct c_declspecs *type_quals_attrs, struct c_declarator *target) { tree attrs; int quals = 0; struct c_declarator *itarget = target; struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); if (type_quals_attrs) { attrs = type_quals_attrs->attrs; quals = quals_from_declspecs (type_quals_attrs); if (attrs != NULL_TREE) itarget = build_attrs_declarator (attrs, target); } ret->kind = cdk_pointer; ret->declarator = itarget; ret->u.pointer_quals = quals; return ret; } /* Return a pointer to a structure for an empty list of declaration specifiers. */ struct c_declspecs * build_null_declspecs (void) { struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs); memset (ret, 0, sizeof *ret); ret->align_log = -1; ret->typespec_word = cts_none; ret->storage_class = csc_none; ret->expr_const_operands = true; ret->typespec_kind = ctsk_none; ret->address_space = ADDR_SPACE_GENERIC; return ret; } /* Add the address space ADDRSPACE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_addrspace (location_t location, struct c_declspecs *specs, addr_space_t as) { specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; if (!ADDR_SPACE_GENERIC_P (specs->address_space) && specs->address_space != as) error ("incompatible address space qualifiers %qs and %qs", c_addr_space_name (as), c_addr_space_name (specs->address_space)); else { specs->address_space = as; specs->locations[cdw_address_space] = location; } return specs; } /* Add the type qualifier QUAL to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_qual (location_t loc, struct c_declspecs *specs, tree qual) { enum rid i; bool dupe = false; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (qual)); i = C_RID_CODE (qual); location_t prev_loc = UNKNOWN_LOCATION; switch (i) { case RID_CONST: dupe = specs->const_p; specs->const_p = true; prev_loc = specs->locations[cdw_const]; specs->locations[cdw_const] = loc; break; case RID_VOLATILE: dupe = specs->volatile_p; specs->volatile_p = true; prev_loc = specs->locations[cdw_volatile]; specs->locations[cdw_volatile] = loc; break; case RID_RESTRICT: dupe = specs->restrict_p; specs->restrict_p = true; prev_loc = specs->locations[cdw_restrict]; specs->locations[cdw_restrict] = loc; break; case RID_ATOMIC: dupe = specs->atomic_p; specs->atomic_p = true; prev_loc = specs->locations[cdw_atomic]; specs->locations[cdw_atomic] = loc; break; default: gcc_unreachable (); } if (dupe) { bool warned = pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE declaration specifier", qual); if (!warned && warn_duplicate_decl_specifier && prev_loc >= RESERVED_LOCATION_COUNT && !from_macro_expansion_at (prev_loc) && !from_macro_expansion_at (loc)) warning_at (loc, OPT_Wduplicate_decl_specifier, "duplicate %qE declaration specifier", qual); } return specs; } /* Add the type specifier TYPE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_type (location_t loc, struct c_declspecs *specs, struct c_typespec spec) { tree type = spec.spec; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; specs->typespec_kind = spec.kind; if (TREE_DEPRECATED (type)) specs->deprecated_p = true; /* Handle type specifier keywords. */ if (TREE_CODE (type) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (type) && C_RID_CODE (type) != RID_CXX_COMPAT_WARN) { enum rid i = C_RID_CODE (type); if (specs->type) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } if ((int) i <= (int) RID_LAST_MODIFIER) { /* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */ bool dupe = false; switch (i) { case RID_LONG: if (specs->long_long_p) { error_at (loc, "%<long long long%> is too long for GCC"); break; } if (specs->long_p) { if (specs->typespec_word == cts_double) { error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); break; } pedwarn_c90 (loc, OPT_Wlong_long, "ISO C90 does not support %<long long%>"); specs->long_long_p = 1; specs->locations[cdw_long_long] = loc; break; } if (specs->short_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<long%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<long%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<long%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<long%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<long%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->long_p = true; specs->locations[cdw_long] = loc; } break; case RID_SHORT: dupe = specs->short_p; if (specs->long_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<short%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<short%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<short%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<short%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<short%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->short_p = true; specs->locations[cdw_short] = loc; } break; case RID_SIGNED: dupe = specs->signed_p; if (specs->unsigned_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<signed%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<signed%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<signed%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<signed%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->signed_p = true; specs->locations[cdw_signed] = loc; } break; case RID_UNSIGNED: dupe = specs->unsigned_p; if (specs->signed_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<unsigned%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<unsigned%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<unsigned%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<unsigned%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->unsigned_p = true; specs->locations[cdw_unsigned] = loc; } break; case RID_COMPLEX: dupe = specs->complex_p; if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<complex%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<complex%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<complex%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->typespec_word == cts_fract) error_at (loc, ("both %<complex%> and %<_Fract%> in " "declaration specifiers")); else if (specs->typespec_word == cts_accum) error_at (loc, ("both %<complex%> and %<_Accum%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<complex%> and %<_Sat%> in " "declaration specifiers")); else { specs->complex_p = true; specs->locations[cdw_complex] = loc; } break; case RID_SAT: dupe = specs->saturating_p; pedwarn (loc, OPT_Wpedantic, "ISO C does not support saturating types"); if (specs->typespec_word == cts_int_n) { error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); } else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<_Sat%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<_Sat%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<_Sat%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<_Sat%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<_Sat%> and %<complex%> in " "declaration specifiers")); else { specs->saturating_p = true; specs->locations[cdw_saturating] = loc; } break; default: gcc_unreachable (); } if (dupe) error_at (loc, "duplicate %qE", type); return specs; } else { /* "void", "_Bool", "char", "int", "float", "double", "_FloatN", "_FloatNx", "_Decimal32", "__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or "__auto_type". */ if (specs->typespec_word != cts_none) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } switch (i) { case RID_AUTO_TYPE: if (specs->long_p) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else { specs->typespec_word = cts_auto_type; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: specs->int_n_idx = i - RID_INT_N_0; if (!in_system_header_at (input_location) /* If the INT_N type ends in "__", and so is of the format "__intN__", don't pedwarn. */ && (strncmp (IDENTIFIER_POINTER (type) + (IDENTIFIER_LENGTH (type) - 2), "__", 2) != 0)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support %<__int%d%> types", int_n_data[specs->int_n_idx].bitsize); if (specs->long_p) error_at (loc, ("both %<__int%d%> and %<long%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->short_p) error_at (loc, ("both %<__int%d%> and %<short%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (! int_n_enabled_p[specs->int_n_idx]) { specs->typespec_word = cts_int_n; error_at (loc, "%<__int%d%> is not supported on this target", int_n_data[specs->int_n_idx].bitsize); } else { specs->typespec_word = cts_int_n; specs->locations[cdw_typespec] = loc; } return specs; case RID_VOID: if (specs->long_p) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else { specs->typespec_word = cts_void; specs->locations[cdw_typespec] = loc; } return specs; case RID_BOOL: if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support boolean types"); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else { specs->typespec_word = cts_bool; specs->locations[cdw_typespec] = loc; } return specs; case RID_CHAR: if (specs->long_p) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else { specs->typespec_word = cts_char; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT: if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else { specs->typespec_word = cts_int; specs->locations[cdw_typespec] = loc; } return specs; case RID_FLOAT: if (specs->long_p) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else { specs->typespec_word = cts_float; specs->locations[cdw_typespec] = loc; } return specs; case RID_DOUBLE: if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else { specs->typespec_word = cts_double; specs->locations[cdw_typespec] = loc; } return specs; CASE_RID_FLOATN_NX: specs->floatn_nx_idx = i - RID_FLOATN_NX_FIRST; if (!in_system_header_at (input_location)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support the %<_Float%d%s%> type", floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE) { specs->typespec_word = cts_floatn_nx; error_at (loc, "%<_Float%d%s%> is not supported on this target", floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); } else { specs->typespec_word = cts_floatn_nx; specs->locations[cdw_typespec] = loc; } return specs; case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: { const char *str; if (i == RID_DFLOAT32) str = "_Decimal32"; else if (i == RID_DFLOAT64) str = "_Decimal64"; else str = "_Decimal128"; if (specs->long_long_p) error_at (loc, ("both %<long long%> and %qs in " "declaration specifiers"), str); if (specs->long_p) error_at (loc, ("both %<long%> and %qs in " "declaration specifiers"), str); else if (specs->short_p) error_at (loc, ("both %<short%> and %qs in " "declaration specifiers"), str); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %qs in " "declaration specifiers"), str); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %qs in " "declaration specifiers"), str); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %qs in " "declaration specifiers"), str); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %qs in " "declaration specifiers"), str); else if (i == RID_DFLOAT32) specs->typespec_word = cts_dfloat32; else if (i == RID_DFLOAT64) specs->typespec_word = cts_dfloat64; else specs->typespec_word = cts_dfloat128; specs->locations[cdw_typespec] = loc; } if (!targetm.decimal_float_supported_p ()) error_at (loc, ("decimal floating-point not supported " "for this target")); pedwarn_c11 (loc, OPT_Wpedantic, "ISO C does not support decimal floating-point " "before C2X"); return specs; case RID_FRACT: case RID_ACCUM: { const char *str; if (i == RID_FRACT) str = "_Fract"; else str = "_Accum"; if (specs->complex_p) error_at (loc, ("both %<complex%> and %qs in " "declaration specifiers"), str); else if (i == RID_FRACT) specs->typespec_word = cts_fract; else specs->typespec_word = cts_accum; specs->locations[cdw_typespec] = loc; } if (!targetm.fixed_point_supported_p ()) error_at (loc, "fixed-point types not supported for this target"); pedwarn (loc, OPT_Wpedantic, "ISO C does not support fixed-point types"); return specs; default: /* ObjC reserved word "id", handled below. */ break; } } } /* Now we have a typedef (a TYPE_DECL node), an identifier (some form of ObjC type, cases such as "int" and "long" being handled above), a TYPE (struct, union, enum and typeof specifiers) or an ERROR_MARK. In none of these cases may there have previously been any type specifiers. */ if (specs->type || specs->typespec_word != cts_none || specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p || specs->complex_p) error_at (loc, "two or more data types in declaration specifiers"); else if (TREE_CODE (type) == TYPE_DECL) { if (TREE_TYPE (type) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { specs->type = TREE_TYPE (type); specs->decl_attr = DECL_ATTRIBUTES (type); specs->typedef_p = true; specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type); specs->locations[cdw_typedef] = loc; /* If this typedef name is defined in a struct, then a C++ lookup would return a different value. */ if (warn_cxx_compat && I_SYMBOL_BINDING (DECL_NAME (type))->in_struct) warning_at (loc, OPT_Wc___compat, "C++ lookup of %qD would return a field, not a type", type); /* If we are parsing a struct, record that a struct field used a typedef. */ if (warn_cxx_compat && struct_parse_info != NULL) struct_parse_info->typedefs_seen.safe_push (type); } } else if (TREE_CODE (type) == IDENTIFIER_NODE) { tree t = lookup_name (type); if (!t || TREE_CODE (t) != TYPE_DECL) error_at (loc, "%qE fails to be a typedef or built in type", type); else if (TREE_TYPE (t) == error_mark_node) ; else { specs->type = TREE_TYPE (t); specs->locations[cdw_typespec] = loc; } } else { if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof) { specs->typedef_p = true; specs->locations[cdw_typedef] = loc; if (spec.expr) { if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr), specs->expr, spec.expr); else specs->expr = spec.expr; specs->expr_const_operands &= spec.expr_const_operands; } } specs->type = type; } return specs; } /* Add the storage class specifier or function specifier SCSPEC to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_scspec (location_t loc, struct c_declspecs *specs, tree scspec) { enum rid i; enum c_storage_class n = csc_none; bool dupe = false; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (scspec)); i = C_RID_CODE (scspec); if (specs->non_sc_seen_p) warning (OPT_Wold_style_declaration, "%qE is not at beginning of declaration", scspec); switch (i) { case RID_INLINE: /* C99 permits duplicate inline. Although of doubtful utility, it seems simplest to permit it in gnu89 mode as well, as there is also little utility in maintaining this as a difference between gnu89 and C99 inline. */ dupe = false; specs->inline_p = true; specs->locations[cdw_inline] = loc; break; case RID_NORETURN: /* Duplicate _Noreturn is permitted. */ dupe = false; specs->noreturn_p = true; specs->locations[cdw_noreturn] = loc; break; case RID_THREAD: dupe = specs->thread_p; if (specs->storage_class == csc_auto) error ("%qE used with %<auto%>", scspec); else if (specs->storage_class == csc_register) error ("%qE used with %<register%>", scspec); else if (specs->storage_class == csc_typedef) error ("%qE used with %<typedef%>", scspec); else { specs->thread_p = true; specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec), "__thread") == 0); /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spelling. */ if (!specs->thread_gnu_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", scspec); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", scspec); } specs->locations[cdw_thread] = loc; } break; case RID_AUTO: n = csc_auto; break; case RID_EXTERN: n = csc_extern; /* Diagnose "__thread extern". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<extern%>"); break; case RID_REGISTER: n = csc_register; break; case RID_STATIC: n = csc_static; /* Diagnose "__thread static". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<static%>"); break; case RID_TYPEDEF: n = csc_typedef; break; default: gcc_unreachable (); } if (n != csc_none && n == specs->storage_class) dupe = true; if (dupe) { if (i == RID_THREAD) error ("duplicate %<_Thread_local%> or %<__thread%>"); else error ("duplicate %qE", scspec); } if (n != csc_none) { if (specs->storage_class != csc_none && n != specs->storage_class) { error ("multiple storage classes in declaration specifiers"); } else { specs->storage_class = n; specs->locations[cdw_storage_class] = loc; if (n != csc_extern && n != csc_static && specs->thread_p) { error ("%qs used with %qE", specs->thread_gnu_p ? "__thread" : "_Thread_local", scspec); specs->thread_p = false; } } } return specs; } /* Add the attributes ATTRS to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_attrs (location_t loc, struct c_declspecs *specs, tree attrs) { specs->attrs = chainon (attrs, specs->attrs); specs->locations[cdw_attributes] = loc; specs->declspecs_seen_p = true; /* In the case of standard attributes at the start of the declaration, the caller will reset this. */ specs->non_std_attrs_seen_p = true; return specs; } /* Add an _Alignas specifier (expression ALIGN, or type whose alignment is ALIGN) to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_alignas (location_t loc, struct c_declspecs *specs, tree align) { specs->alignas_p = true; specs->locations[cdw_alignas] = loc; if (align == error_mark_node) return specs; /* Only accept the alignment if it's valid and greater than the current one. Zero is invalid but by C11 required to be silently ignored. */ int align_log = check_user_alignment (align, false, /* warn_zero = */false); if (align_log > specs->align_log) specs->align_log = align_log; return specs; } /* Combine "long", "short", "signed", "unsigned" and "_Complex" type specifiers with any other type specifier to determine the resulting type. This is where ISO C checks on complex types are made, since "_Complex long" is a prefix of the valid ISO C type "_Complex long double". Also apply postfix standard attributes to modify the type. */ struct c_declspecs * finish_declspecs (struct c_declspecs *specs) { /* If a type was specified as a whole, we have no modifiers and are done. */ if (specs->type != NULL_TREE) { gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Set a dummy type. */ if (TREE_CODE (specs->type) == ERROR_MARK) specs->type = integer_type_node; goto handle_postfix_attrs; } /* If none of "void", "_Bool", "char", "int", "float" or "double" has been specified, treat it as "int" unless "_Complex" is present and there are no other specifiers. If we just have "_Complex", it is equivalent to "_Complex double", but e.g. "_Complex short" is equivalent to "_Complex short int". */ if (specs->typespec_word == cts_none) { if (specs->saturating_p) { error_at (specs->locations[cdw_saturating], "%<_Sat%> is used without %<_Fract%> or %<_Accum%>"); if (!targetm.fixed_point_supported_p ()) error_at (specs->locations[cdw_saturating], "fixed-point types not supported for this target"); specs->typespec_word = cts_fract; } else if (specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p) { specs->typespec_word = cts_int; } else if (specs->complex_p) { specs->typespec_word = cts_double; pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support plain %<complex%> meaning " "%<double complex%>"); } else { specs->typespec_word = cts_int; specs->default_int_p = true; /* We don't diagnose this here because grokdeclarator will give more specific diagnostics according to whether it is a function definition. */ } } /* If "signed" was specified, record this to distinguish "int" and "signed int" in the case of a bit-field with -funsigned-bitfields. */ specs->explicit_signed_p = specs->signed_p; /* Now compute the actual type. */ switch (specs->typespec_word) { case cts_auto_type: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Type to be filled in later. */ if (specs->postfix_attrs) error ("%<__auto_type%> followed by %<[[]]%> attributes"); break; case cts_void: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = void_type_node; break; case cts_bool: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = boolean_type_node; break; case cts_char: gcc_assert (!specs->long_p && !specs->short_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->signed_p) specs->type = signed_char_type_node; else if (specs->unsigned_p) specs->type = unsigned_char_type_node; else specs->type = char_type_node; if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int_n: gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (! int_n_enabled_p[specs->int_n_idx]) specs->type = integer_type_node; else specs->type = (specs->unsigned_p ? int_n_trees[specs->int_n_idx].unsigned_type : int_n_trees[specs->int_n_idx].signed_type); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int: gcc_assert (!(specs->long_p && specs->short_p)); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->long_long_p) specs->type = (specs->unsigned_p ? long_long_unsigned_type_node : long_long_integer_type_node); else if (specs->long_p) specs->type = (specs->unsigned_p ? long_unsigned_type_node : long_integer_type_node); else if (specs->short_p) specs->type = (specs->unsigned_p ? short_unsigned_type_node : short_integer_type_node); else specs->type = (specs->unsigned_p ? unsigned_type_node : integer_type_node); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_float: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); specs->type = (specs->complex_p ? complex_float_type_node : float_type_node); break; case cts_double: gcc_assert (!specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (specs->long_p) { specs->type = (specs->complex_p ? complex_long_double_type_node : long_double_type_node); } else { specs->type = (specs->complex_p ? complex_double_type_node : double_type_node); } break; case cts_floatn_nx: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE) specs->type = integer_type_node; else if (specs->complex_p) specs->type = COMPLEX_FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx); else specs->type = FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx); break; case cts_dfloat32: case cts_dfloat64: case cts_dfloat128: gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); if (!targetm.decimal_float_supported_p ()) specs->type = integer_type_node; else if (specs->typespec_word == cts_dfloat32) specs->type = dfloat32_type_node; else if (specs->typespec_word == cts_dfloat64) specs->type = dfloat64_type_node; else specs->type = dfloat128_type_node; break; case cts_fract: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_fract_type_node : sat_long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_fract_type_node : sat_long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_fract_type_node : sat_short_fract_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_fract_type_node : sat_fract_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_fract_type_node : long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_fract_type_node : long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_fract_type_node : short_fract_type_node; else specs->type = specs->unsigned_p ? unsigned_fract_type_node : fract_type_node; } break; case cts_accum: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_accum_type_node : sat_long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_accum_type_node : sat_long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_accum_type_node : sat_short_accum_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_accum_type_node : sat_accum_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_accum_type_node : long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_accum_type_node : long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_accum_type_node : short_accum_type_node; else specs->type = specs->unsigned_p ? unsigned_accum_type_node : accum_type_node; } break; default: gcc_unreachable (); } handle_postfix_attrs: if (specs->type != NULL) { specs->postfix_attrs = c_warn_type_attributes (specs->postfix_attrs); decl_attributes (&specs->type, specs->postfix_attrs, 0); specs->postfix_attrs = NULL_TREE; } return specs; } /* Perform final processing on one file scope's declarations (or the external scope's declarations), GLOBALS. */ static void c_write_global_declarations_1 (tree globals) { tree decl; bool reconsider; /* Process the decls in the order they were written. */ for (decl = globals; decl; decl = DECL_CHAIN (decl)) { /* Check for used but undefined static functions using the C standard's definition of "used", and set TREE_NO_WARNING so that check_global_declaration doesn't repeat the check. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == NULL_TREE && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl)) { if (C_DECL_USED (decl)) { if (pedwarn (input_location, 0, "%q+F used but never defined", decl)) TREE_NO_WARNING (decl) = 1; } /* For -Wunused-function warn about unused static prototypes. */ else if (warn_unused_function && ! DECL_ARTIFICIAL (decl) && ! TREE_NO_WARNING (decl)) { if (warning (OPT_Wunused_function, "%q+F declared %<static%> but never defined", decl)) TREE_NO_WARNING (decl) = 1; } } wrapup_global_declaration_1 (decl); } do { reconsider = false; for (decl = globals; decl; decl = DECL_CHAIN (decl)) reconsider |= wrapup_global_declaration_2 (decl); } while (reconsider); } /* Preserve the external declarations scope across a garbage collect. */ static GTY(()) tree ext_block; /* Collect all references relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { tree t; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file); collect_ada_nodes (BLOCK_VARS (ext_block), source_file); } /* Collect source file references at global level. */ static void collect_source_refs (void) { tree t; tree decls; tree decl; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) { decls = DECL_INITIAL (t); for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl)) if (!DECL_IS_BUILTIN (decl)) collect_source_ref (DECL_SOURCE_FILE (decl)); } for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl)) if (!DECL_IS_BUILTIN (decl)) collect_source_ref (DECL_SOURCE_FILE (decl)); } /* Perform any final parser cleanups and generate initial debugging information. */ void c_parse_final_cleanups (void) { tree t; unsigned i; /* We don't want to do this if generating a PCH. */ if (pch_file) return; timevar_stop (TV_PHASE_PARSING); timevar_start (TV_PHASE_DEFERRED); /* Do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). */ if (c_dialect_objc ()) objc_write_global_declarations (); /* Close the external scope. */ ext_block = pop_scope (); external_scope = 0; gcc_assert (!current_scope); /* Handle -fdump-ada-spec[-slim]. */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { /* Build a table of files to generate specs for */ collect_source_ref (main_input_filename); if (!flag_dump_ada_spec_slim) collect_source_refs (); dump_ada_specs (collect_all_refs, NULL); } /* Process all file scopes in this compilation, and the external_scope, through wrapup_global_declarations. */ FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_1 (BLOCK_VARS (ext_block)); timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_PARSING); ext_block = NULL; } /* Register reserved keyword WORD as qualifier for address space AS. */ void c_register_addr_space (const char *word, addr_space_t as) { int rid = RID_FIRST_ADDR_SPACE + as; tree id; /* Address space qualifiers are only supported in C with GNU extensions enabled. */ if (c_dialect_objc () || flag_no_asm) return; id = get_identifier (word); C_SET_RID_CODE (id, rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [rid] = id; } /* Return identifier to look up for omp declare reduction. */ tree c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id) { const char *p = NULL; switch (reduction_code) { case PLUS_EXPR: p = "+"; break; case MULT_EXPR: p = "*"; break; case MINUS_EXPR: p = "-"; break; case BIT_AND_EXPR: p = "&"; break; case BIT_XOR_EXPR: p = "^"; break; case BIT_IOR_EXPR: p = "|"; break; case TRUTH_ANDIF_EXPR: p = "&&"; break; case TRUTH_ORIF_EXPR: p = "||"; break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); size_t len = strlen (p); char *name = XALLOCAVEC (char, lenp + len); memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); return get_identifier (name); } /* Lookup REDUCTION_ID in the current scope, or create an artificial VAR_DECL, bind it into the current scope and return it. */ tree c_omp_reduction_decl (tree reduction_id) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); if (b != NULL && B_IN_CURRENT_SCOPE (b)) return b->decl; tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL, reduction_id, integer_type_node); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_STATIC (decl) = 1; TREE_PUBLIC (decl) = 0; bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION); return decl; } /* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */ tree c_omp_reduction_lookup (tree reduction_id, tree type) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); while (b) { tree t; for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) return TREE_VALUE (t); b = b->shadowed; } return error_mark_node; } /* Helper function called via walk_tree, to diagnose invalid #pragma omp declare reduction combiners or initializers. */ tree c_check_omp_declare_reduction_r (tree *tp, int *, void *data) { tree *vars = (tree *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != vars[0] && *tp != vars[1]) { location_t loc = DECL_SOURCE_LOCATION (vars[0]); if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } bool c_check_in_current_scope (tree decl) { struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (decl)); return b != NULL && B_IN_CURRENT_SCOPE (b); } #include "gt-c-c-decl.h"
GB_iso_expand.c
//------------------------------------------------------------------------------ // GB_iso_expand: expand a scalar into an entire array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" void GB_iso_expand // expand an iso scalar into an entire array ( void *restrict X, // output array to expand into int64_t n, // # of entries in X void *restrict scalar, // scalar to expand into X size_t size, // size of the scalar and each entry of X GB_Context Context ) { //-------------------------------------------------------------------------- // determine how many threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // copy the value into X //-------------------------------------------------------------------------- int64_t p ; switch (size) { case GB_1BYTE : // bool, uint8, int8, and UDT of size 1 { uint8_t a0 = (*((uint8_t *) scalar)) ; uint8_t *restrict Z = (uint8_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_2BYTE : // uint16, int16, and UDT of size 2 { uint16_t a0 = (*((uint16_t *) scalar)) ; uint16_t *restrict Z = (uint16_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_4BYTE : // uint32, int32, float, and UDT of size 4 { uint32_t a0 = (*((uint32_t *) scalar)) ; uint32_t *restrict Z = (uint32_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_8BYTE : // uint64, int64, double, float complex, UDT size 8 { uint64_t a0 = (*((uint64_t *) scalar)) ; uint64_t *restrict Z = (uint64_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_16BYTE : // double complex, and UDT size 16 { GB_blob16 a0 = (*((GB_blob16 *) scalar)) ; GB_blob16 *restrict Z = (GB_blob16 *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; default : // user-defined types of arbitrary size { GB_void *restrict Z = (GB_void *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { memcpy (Z + p*size, scalar, size) ; } } break ; } }
if-4.c
void f0 (void); void f1 (int *p) { int i; #pragma omp task if (0) if (0) /* { dg-error "too many 'if' clauses without modifier" } */ f0 (); #pragma omp task if (0) if (1) /* { dg-error "too many 'if' clauses without modifier" } */ f0 (); #pragma omp task if (task:0) if (task:0) /* { dg-error "too many 'if' clauses with 'task' modifier" } */ f0 (); #pragma omp task if (task:0) if (1) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ f0 (); #pragma omp task if (0) if (task:1) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ f0 (); #pragma omp taskloop if (0) if (0) /* { dg-error "too many 'if' clauses without modifier" } */ for (i = 0; i < 8; ++i) f0 (); #pragma omp taskloop if (0) if (1) /* { dg-error "too many 'if' clauses without modifier" } */ for (i = 0; i < 8; ++i) f0 (); #pragma omp taskloop if (taskloop:0) if (taskloop:0) /* { dg-error "too many 'if' clauses with 'taskloop' modifier" } */ for (i = 0; i < 8; ++i) f0 (); #pragma omp taskloop if (taskloop:0) if (1) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ for (i = 0; i < 8; ++i) f0 (); #pragma omp taskloop if (0) if (taskloop:0) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ for (i = 0; i < 8; ++i) f0 (); #pragma omp target data if (1) if (1) map (alloc: i) /* { dg-error "too many 'if' clauses without modifier" } */ f0 (); #pragma omp target data if (target data: 1) if (target data:0) map (alloc: i) /* { dg-error "too many 'if' clauses with 'target data' modifier" } */ f0 (); #pragma omp target data if (1) if (target data:0) map (alloc: i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ f0 (); #pragma omp target data if (target data: 1) if (0) map (alloc: i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ f0 (); #pragma omp target enter data if (1) if (1) map (to: i) /* { dg-error "too many 'if' clauses without modifier" } */ #pragma omp target enter data if (target enter data: 1) if (target enter data:0) map (to: i) /* { dg-error "too many 'if' clauses with 'target enter data' modifier" } */ #pragma omp target enter data if (1) if (target enter data:0) map (to: i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ #pragma omp target enter data if (target enter data: 1) if (0) map (to: i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ #pragma omp target exit data if (1) if (1) map (from: i) /* { dg-error "too many 'if' clauses without modifier" } */ #pragma omp target exit data if (target exit data: 1) if (target exit data:0) map (from: i) /* { dg-error "too many 'if' clauses with 'target exit data' modifier" } */ #pragma omp target exit data if (1) if (target exit data:0) map (from: i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ #pragma omp target exit data if (target exit data: 1) if (0) map (from: i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ #pragma omp target if (1) if (1) /* { dg-error "too many 'if' clauses without modifier" } */ f0 (); #pragma omp target if (target: 1) if (target:0) /* { dg-error "too many 'if' clauses with 'target' modifier" } */ f0 (); #pragma omp target if (1) if (target:0) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ f0 (); #pragma omp target if (target: 1) if (0) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ f0 (); #pragma omp target update if (1) if (1) to (i) /* { dg-error "too many 'if' clauses without modifier" } */ #pragma omp target update if (target update: 1) if (target update:0) to (i) /* { dg-error "too many 'if' clauses with 'target update' modifier" } */ #pragma omp target update if (1) if (target update:0) to (i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ #pragma omp target update if (target update: 1) if (0) to (i) /* { dg-error "if any 'if' clause has modifier, then all 'if' clauses have to use modifier" } */ }
fig4.98-threadprivate.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif int calculate_sum(int length); int *pglobal; #pragma omp threadprivate(pglobal) int main() { int i, j, sum, TID, n = 5; int length[n], check[n]; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); #endif for (i=0; i<n; i++) { length[i] = 10 * (i+1); check[i] = length[i]*(length[i]+1)/2; } #pragma omp parallel for shared(n,length,check) private(TID,i,j,sum) for (i=0; i<n; i++) { TID = omp_get_thread_num(); if ( (pglobal = (int *) malloc(length[i]*sizeof(int))) != NULL ) { for (j=sum=0; j<length[i]; j++) pglobal[j] = j+1; sum = calculate_sum(length[i]); printf("TID %d: value of sum for i = %d is %8d (check = %8d)\n", TID,i,sum,check[i]); free(pglobal); } else { printf("TID %d: fatal error in malloc for length[%d] = %d\n", TID,i,length[i]); } } /*-- End of parallel for --*/ return(0); } int calculate_sum(int length) { int sum = 0; for (int j=0; j<length; j++) sum += pglobal[j]; return(sum); }
glove_cython.c
/* Generated by Cython 0.29.19 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "extra_compile_args": [ "-fopenmp", "-ffast-math", "-march=native" ], "extra_link_args": [ "-fopenmp" ], "name": "glove.glove_cython", "sources": [ "glove/glove_cython.pyx" ] }, "module_name": "glove.glove_cython" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_19" #define CYTHON_HEX_VERSION 0x001D13F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__glove__glove_cython #define __PYX_HAVE_API__glove__glove_cython /* Early includes */ #include "math.h" #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "glove/glove_cython.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'glove.glove_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static CYTHON_INLINE double __pyx_f_5glove_12glove_cython_double_min(double, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "glove.glove_cython" extern int __pyx_module_is_main_glove__glove_cython; int __pyx_module_is_main_glove__glove_cython = 0; /* Implementation of 'glove.glove_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_sp[] = "sp"; static const char __pyx_k__19[] = "*"; static const char __pyx_k_col[] = "col"; static const char __pyx_k_dim[] = "dim"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_row[] = "row"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_loss[] = "loss"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_alpha[] = "alpha"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_count[] = "count"; static const char __pyx_k_epoch[] = "epoch"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_counts[] = "counts"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_epochs[] = "epochs"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_word_a[] = "word_a"; static const char __pyx_k_word_b[] = "word_b"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_wordvec[] = "wordvec"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_gradient[] = "gradient"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_max_loss[] = "max_loss"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_wordbias[] = "wordbias"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_max_count[] = "max_count"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_no_threads[] = "no_threads"; static const char __pyx_k_prediction[] = "prediction"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_collections[] = "collections"; static const char __pyx_k_fit_vectors[] = "fit_vectors"; static const char __pyx_k_entry_weight[] = "entry_weight"; static const char __pyx_k_paragraphvec[] = "paragraphvec"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_scipy_sparse[] = "scipy.sparse"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_learning_rate[] = "learning_rate"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_shuffle_index[] = "shuffle_index"; static const char __pyx_k_sum_gradients[] = "sum_gradients"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_shuffle_indices[] = "shuffle_indices"; static const char __pyx_k_no_cooccurrences[] = "no_cooccurrences"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_glove_glove_cython[] = "glove.glove_cython"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_transform_paragraph[] = "transform_paragraph"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_initial_learning_rate[] = "initial_learning_rate"; static const char __pyx_k_wordvec_sum_gradients[] = "wordvec_sum_gradients"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_glove_glove_cython_pyx[] = "glove/glove_cython.pyx"; static const char __pyx_k_wordbias_sum_gradients[] = "wordbias_sum_gradients"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s__19; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_col; static PyObject *__pyx_n_s_collections; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_counts; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dim; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_entry_weight; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_epoch; static PyObject *__pyx_n_s_epochs; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_fit_vectors; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_glove_glove_cython; static PyObject *__pyx_kp_s_glove_glove_cython_pyx; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_gradient; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_initial_learning_rate; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_learning_rate; static PyObject *__pyx_n_s_loss; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_max_count; static PyObject *__pyx_n_s_max_loss; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_n_s_no_cooccurrences; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_no_threads; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_paragraphvec; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_prediction; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_row; static PyObject *__pyx_n_s_scipy_sparse; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_shuffle_index; static PyObject *__pyx_n_s_shuffle_indices; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_sp; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_sum_gradients; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_transform_paragraph; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_word_a; static PyObject *__pyx_n_s_word_b; static PyObject *__pyx_n_s_wordbias; static PyObject *__pyx_n_s_wordbias_sum_gradients; static PyObject *__pyx_n_s_wordvec; static PyObject *__pyx_n_s_wordvec_sum_gradients; static PyObject *__pyx_pf_5glove_12glove_cython_fit_vectors(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_sum_gradients, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_wordbias_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_col, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, double __pyx_v_max_loss, CYTHON_UNUSED int __pyx_v_no_threads); /* proto */ static PyObject *__pyx_pf_5glove_12glove_cython_2transform_paragraph(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_paragraphvec, __Pyx_memviewslice __pyx_v_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, int __pyx_v_epochs); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_codeobj__21; static PyObject *__pyx_codeobj__23; static PyObject *__pyx_codeobj__30; /* Late includes */ /* "glove/glove_cython.pyx":10 * * * cdef inline double double_min(double a, double b) nogil: return a if a <= b else b # <<<<<<<<<<<<<< * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b * cdef inline int int_max(int a, int b) nogil: return a if a > b else b */ static CYTHON_INLINE double __pyx_f_5glove_12glove_cython_double_min(double __pyx_v_a, double __pyx_v_b) { double __pyx_r; double __pyx_t_1; if (((__pyx_v_a <= __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/glove_cython.pyx":11 * * cdef inline double double_min(double a, double b) nogil: return a if a <= b else b * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b # <<<<<<<<<<<<<< * cdef inline int int_max(int a, int b) nogil: return a if a > b else b * */ static CYTHON_INLINE int __pyx_f_5glove_12glove_cython_int_min(int __pyx_v_a, int __pyx_v_b) { int __pyx_r; int __pyx_t_1; if (((__pyx_v_a <= __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/glove_cython.pyx":12 * cdef inline double double_min(double a, double b) nogil: return a if a <= b else b * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b * cdef inline int int_max(int a, int b) nogil: return a if a > b else b # <<<<<<<<<<<<<< * * */ static CYTHON_INLINE int __pyx_f_5glove_12glove_cython_int_max(int __pyx_v_a, int __pyx_v_b) { int __pyx_r; int __pyx_t_1; if (((__pyx_v_a > __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ /* Python wrapper */ static PyObject *__pyx_pw_5glove_12glove_cython_1fit_vectors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5glove_12glove_cython_fit_vectors[] = "\n Estimate GloVe word embeddings given the cooccurrence matrix.\n Modifies the word vector and word bias array in-place.\n\n Training is performed via asynchronous stochastic gradient descent,\n using the AdaGrad per-coordinate learning rate.\n "; static PyMethodDef __pyx_mdef_5glove_12glove_cython_1fit_vectors = {"fit_vectors", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5glove_12glove_cython_1fit_vectors, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5glove_12glove_cython_fit_vectors}; static PyObject *__pyx_pw_5glove_12glove_cython_1fit_vectors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordvec_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordbias = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordbias_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_row = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_col = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_counts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_shuffle_indices = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_initial_learning_rate; double __pyx_v_max_count; double __pyx_v_alpha; double __pyx_v_max_loss; CYTHON_UNUSED int __pyx_v_no_threads; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fit_vectors (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordvec_sum_gradients,&__pyx_n_s_wordbias,&__pyx_n_s_wordbias_sum_gradients,&__pyx_n_s_row,&__pyx_n_s_col,&__pyx_n_s_counts,&__pyx_n_s_shuffle_indices,&__pyx_n_s_initial_learning_rate,&__pyx_n_s_max_count,&__pyx_n_s_alpha,&__pyx_n_s_max_loss,&__pyx_n_s_no_threads,0}; PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); CYTHON_FALLTHROUGH; case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); CYTHON_FALLTHROUGH; case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); CYTHON_FALLTHROUGH; case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec_sum_gradients)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 1); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordbias)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 2); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordbias_sum_gradients)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 3); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 4); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_col)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 5); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 6); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shuffle_indices)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 7); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_initial_learning_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 8); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (likely((values[9] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_count)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 9); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 10: if (likely((values[10] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 10); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 11: if (likely((values[11] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_loss)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 11); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 12: if (likely((values[12] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_no_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 12); __PYX_ERR(0, 20, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fit_vectors") < 0)) __PYX_ERR(0, 20, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 13) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); } __pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec.memview)) __PYX_ERR(0, 20, __pyx_L3_error) __pyx_v_wordvec_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec_sum_gradients.memview)) __PYX_ERR(0, 21, __pyx_L3_error) __pyx_v_wordbias = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordbias.memview)) __PYX_ERR(0, 22, __pyx_L3_error) __pyx_v_wordbias_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordbias_sum_gradients.memview)) __PYX_ERR(0, 23, __pyx_L3_error) __pyx_v_row = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row.memview)) __PYX_ERR(0, 24, __pyx_L3_error) __pyx_v_col = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_col.memview)) __PYX_ERR(0, 25, __pyx_L3_error) __pyx_v_counts = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[6], PyBUF_WRITABLE); if (unlikely(!__pyx_v_counts.memview)) __PYX_ERR(0, 26, __pyx_L3_error) __pyx_v_shuffle_indices = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[7], PyBUF_WRITABLE); if (unlikely(!__pyx_v_shuffle_indices.memview)) __PYX_ERR(0, 27, __pyx_L3_error) __pyx_v_initial_learning_rate = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_initial_learning_rate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error) __pyx_v_max_count = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_max_count == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L3_error) __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L3_error) __pyx_v_max_loss = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_max_loss == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L3_error) __pyx_v_no_threads = __Pyx_PyInt_As_int(values[12]); if (unlikely((__pyx_v_no_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 20, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("glove.glove_cython.fit_vectors", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5glove_12glove_cython_fit_vectors(__pyx_self, __pyx_v_wordvec, __pyx_v_wordvec_sum_gradients, __pyx_v_wordbias, __pyx_v_wordbias_sum_gradients, __pyx_v_row, __pyx_v_col, __pyx_v_counts, __pyx_v_shuffle_indices, __pyx_v_initial_learning_rate, __pyx_v_max_count, __pyx_v_alpha, __pyx_v_max_loss, __pyx_v_no_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5glove_12glove_cython_fit_vectors(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_sum_gradients, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_wordbias_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_col, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, double __pyx_v_max_loss, CYTHON_UNUSED int __pyx_v_no_threads) { int __pyx_v_dim; CYTHON_UNUSED int __pyx_v_no_cooccurrences; int __pyx_v_word_a; int __pyx_v_word_b; double __pyx_v_count; double __pyx_v_learning_rate; double __pyx_v_gradient; double __pyx_v_prediction; double __pyx_v_entry_weight; double __pyx_v_loss; int __pyx_v_i; int __pyx_v_j; int __pyx_v_shuffle_index; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; __Pyx_RefNannySetupContext("fit_vectors", 0); /* "glove/glove_cython.pyx":43 * # Get number of latent dimensions and * # number of cooccurrences. * cdef int dim = wordvec.shape[1] # <<<<<<<<<<<<<< * cdef int no_cooccurrences = row.shape[0] * */ __pyx_v_dim = (__pyx_v_wordvec.shape[1]); /* "glove/glove_cython.pyx":44 * # number of cooccurrences. * cdef int dim = wordvec.shape[1] * cdef int no_cooccurrences = row.shape[0] # <<<<<<<<<<<<<< * * # Hold indices of current words and */ __pyx_v_no_cooccurrences = (__pyx_v_row.shape[0]); /* "glove/glove_cython.pyx":59 * # We iterate over random indices to simulate * # shuffling the cooccurrence matrix. * with nogil: # <<<<<<<<<<<<<< * for j in prange(no_cooccurrences, num_threads=no_threads, * schedule='dynamic'): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "glove/glove_cython.pyx":60 * # shuffling the cooccurrence matrix. * with nogil: * for j in prange(no_cooccurrences, num_threads=no_threads, # <<<<<<<<<<<<<< * schedule='dynamic'): * shuffle_index = shuffle_indices[j] */ __pyx_t_1 = __pyx_v_no_cooccurrences; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_no_threads) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_count) lastprivate(__pyx_v_entry_weight) lastprivate(__pyx_v_gradient) lastprivate(__pyx_v_i) firstprivate(__pyx_v_j) lastprivate(__pyx_v_j) lastprivate(__pyx_v_learning_rate) lastprivate(__pyx_v_loss) lastprivate(__pyx_v_prediction) lastprivate(__pyx_v_shuffle_index) lastprivate(__pyx_v_word_a) lastprivate(__pyx_v_word_b) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_j = (int)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_count = ((double)__PYX_NAN()); __pyx_v_entry_weight = ((double)__PYX_NAN()); __pyx_v_gradient = ((double)__PYX_NAN()); __pyx_v_i = ((int)0xbad0bad0); __pyx_v_learning_rate = ((double)__PYX_NAN()); __pyx_v_loss = ((double)__PYX_NAN()); __pyx_v_prediction = ((double)__PYX_NAN()); __pyx_v_shuffle_index = ((int)0xbad0bad0); __pyx_v_word_a = ((int)0xbad0bad0); __pyx_v_word_b = ((int)0xbad0bad0); /* "glove/glove_cython.pyx":62 * for j in prange(no_cooccurrences, num_threads=no_threads, * schedule='dynamic'): * shuffle_index = shuffle_indices[j] # <<<<<<<<<<<<<< * word_a = row[shuffle_index] * word_b = col[shuffle_index] */ __pyx_t_4 = __pyx_v_j; __pyx_v_shuffle_index = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_shuffle_indices.data) + __pyx_t_4)) ))); /* "glove/glove_cython.pyx":63 * schedule='dynamic'): * shuffle_index = shuffle_indices[j] * word_a = row[shuffle_index] # <<<<<<<<<<<<<< * word_b = col[shuffle_index] * count = counts[shuffle_index] */ __pyx_t_4 = __pyx_v_shuffle_index; __pyx_v_word_a = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_row.data) + __pyx_t_4)) ))); /* "glove/glove_cython.pyx":64 * shuffle_index = shuffle_indices[j] * word_a = row[shuffle_index] * word_b = col[shuffle_index] # <<<<<<<<<<<<<< * count = counts[shuffle_index] * */ __pyx_t_4 = __pyx_v_shuffle_index; __pyx_v_word_b = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_col.data) + __pyx_t_4)) ))); /* "glove/glove_cython.pyx":65 * word_a = row[shuffle_index] * word_b = col[shuffle_index] * count = counts[shuffle_index] # <<<<<<<<<<<<<< * * # Get prediction */ __pyx_t_4 = __pyx_v_shuffle_index; __pyx_v_count = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_counts.data) + __pyx_t_4)) ))); /* "glove/glove_cython.pyx":68 * * # Get prediction * prediction = 0.0 # <<<<<<<<<<<<<< * * for i in range(dim): */ __pyx_v_prediction = 0.0; /* "glove/glove_cython.pyx":70 * prediction = 0.0 * * for i in range(dim): # <<<<<<<<<<<<<< * prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] * */ __pyx_t_5 = __pyx_v_dim; __pyx_t_6 = __pyx_t_5; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "glove/glove_cython.pyx":71 * * for i in range(dim): * prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] # <<<<<<<<<<<<<< * * prediction = prediction + wordbias[word_a] + wordbias[word_b] */ __pyx_t_4 = __pyx_v_word_a; __pyx_t_8 = __pyx_v_i; __pyx_t_9 = __pyx_v_word_b; __pyx_t_10 = __pyx_v_i; __pyx_v_prediction = (__pyx_v_prediction + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_4 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_8)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_9 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_10)) ))))); } /* "glove/glove_cython.pyx":73 * prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] * * prediction = prediction + wordbias[word_a] + wordbias[word_b] # <<<<<<<<<<<<<< * * # Compute loss and the example weight. */ __pyx_t_10 = __pyx_v_word_a; __pyx_t_9 = __pyx_v_word_b; __pyx_v_prediction = ((__pyx_v_prediction + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_10)) )))) + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_9)) )))); /* "glove/glove_cython.pyx":76 * * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha # <<<<<<<<<<<<<< * loss = entry_weight * (prediction - c_log(count)) * */ __pyx_v_entry_weight = pow(__pyx_f_5glove_12glove_cython_double_min(1.0, (__pyx_v_count / __pyx_v_max_count)), __pyx_v_alpha); /* "glove/glove_cython.pyx":77 * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha * loss = entry_weight * (prediction - c_log(count)) # <<<<<<<<<<<<<< * * # Clip the loss for numerical stability. */ __pyx_v_loss = (__pyx_v_entry_weight * (__pyx_v_prediction - log(__pyx_v_count))); /* "glove/glove_cython.pyx":80 * * # Clip the loss for numerical stability. * if loss < -max_loss: # <<<<<<<<<<<<<< * loss = -max_loss * elif loss > max_loss: */ __pyx_t_11 = ((__pyx_v_loss < (-__pyx_v_max_loss)) != 0); if (__pyx_t_11) { /* "glove/glove_cython.pyx":81 * # Clip the loss for numerical stability. * if loss < -max_loss: * loss = -max_loss # <<<<<<<<<<<<<< * elif loss > max_loss: * loss = max_loss */ __pyx_v_loss = (-__pyx_v_max_loss); /* "glove/glove_cython.pyx":80 * * # Clip the loss for numerical stability. * if loss < -max_loss: # <<<<<<<<<<<<<< * loss = -max_loss * elif loss > max_loss: */ goto __pyx_L12; } /* "glove/glove_cython.pyx":82 * if loss < -max_loss: * loss = -max_loss * elif loss > max_loss: # <<<<<<<<<<<<<< * loss = max_loss * */ __pyx_t_11 = ((__pyx_v_loss > __pyx_v_max_loss) != 0); if (__pyx_t_11) { /* "glove/glove_cython.pyx":83 * loss = -max_loss * elif loss > max_loss: * loss = max_loss # <<<<<<<<<<<<<< * * # Update step: apply gradients and reproject */ __pyx_v_loss = __pyx_v_max_loss; /* "glove/glove_cython.pyx":82 * if loss < -max_loss: * loss = -max_loss * elif loss > max_loss: # <<<<<<<<<<<<<< * loss = max_loss * */ } __pyx_L12:; /* "glove/glove_cython.pyx":87 * # Update step: apply gradients and reproject * # onto the unit sphere. * for i in range(dim): # <<<<<<<<<<<<<< * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) */ __pyx_t_5 = __pyx_v_dim; __pyx_t_6 = __pyx_t_5; for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) { __pyx_v_i = __pyx_t_7; /* "glove/glove_cython.pyx":89 * for i in range(dim): * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) # <<<<<<<<<<<<<< * gradient = loss * wordvec[word_b, i] * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate */ __pyx_t_9 = __pyx_v_word_a; __pyx_t_10 = __pyx_v_i; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_9 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_10)) ))))); /* "glove/glove_cython.pyx":90 * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) * gradient = loss * wordvec[word_b, i] # <<<<<<<<<<<<<< * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate * * gradient) */ __pyx_t_10 = __pyx_v_word_b; __pyx_t_9 = __pyx_v_i; __pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_10 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_9)) )))); /* "glove/glove_cython.pyx":91 * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) * gradient = loss * wordvec[word_b, i] * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate # <<<<<<<<<<<<<< * * gradient) * wordvec_sum_gradients[word_a, i] += gradient ** 2 */ __pyx_t_9 = __pyx_v_word_a; __pyx_t_10 = __pyx_v_i; /* "glove/glove_cython.pyx":92 * gradient = loss * wordvec[word_b, i] * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate * * gradient) # <<<<<<<<<<<<<< * wordvec_sum_gradients[word_a, i] += gradient ** 2 * */ __pyx_t_8 = __pyx_v_word_a; __pyx_t_4 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_8 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_4)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_9 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_10)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient)); /* "glove/glove_cython.pyx":93 * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate * * gradient) * wordvec_sum_gradients[word_a, i] += gradient ** 2 # <<<<<<<<<<<<<< * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) */ __pyx_t_10 = __pyx_v_word_a; __pyx_t_9 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_10 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_9)) )) += pow(__pyx_v_gradient, 2.0); /* "glove/glove_cython.pyx":95 * wordvec_sum_gradients[word_a, i] += gradient ** 2 * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) # <<<<<<<<<<<<<< * gradient = loss * wordvec[word_a, i] * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate */ __pyx_t_12 = __pyx_v_word_b; __pyx_t_13 = __pyx_v_i; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_12 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_13)) ))))); /* "glove/glove_cython.pyx":96 * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) * gradient = loss * wordvec[word_a, i] # <<<<<<<<<<<<<< * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate * * gradient) */ __pyx_t_13 = __pyx_v_word_a; __pyx_t_12 = __pyx_v_i; __pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_13 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_12)) )))); /* "glove/glove_cython.pyx":97 * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) * gradient = loss * wordvec[word_a, i] * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate # <<<<<<<<<<<<<< * * gradient) * wordvec_sum_gradients[word_b, i] += gradient ** 2 */ __pyx_t_12 = __pyx_v_word_b; __pyx_t_13 = __pyx_v_i; /* "glove/glove_cython.pyx":98 * gradient = loss * wordvec[word_a, i] * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate * * gradient) # <<<<<<<<<<<<<< * wordvec_sum_gradients[word_b, i] += gradient ** 2 * */ __pyx_t_14 = __pyx_v_word_b; __pyx_t_15 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_14 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_15)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_12 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_13)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient)); /* "glove/glove_cython.pyx":99 * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate * * gradient) * wordvec_sum_gradients[word_b, i] += gradient ** 2 # <<<<<<<<<<<<<< * * # Update word biases. */ __pyx_t_13 = __pyx_v_word_b; __pyx_t_12 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_13 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_12)) )) += pow(__pyx_v_gradient, 2.0); } /* "glove/glove_cython.pyx":102 * * # Update word biases. * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) # <<<<<<<<<<<<<< * wordbias[word_a] -= learning_rate * loss * wordbias_sum_gradients[word_a] += loss ** 2 */ __pyx_t_16 = __pyx_v_word_a; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_16)) ))))); /* "glove/glove_cython.pyx":103 * # Update word biases. * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) * wordbias[word_a] -= learning_rate * loss # <<<<<<<<<<<<<< * wordbias_sum_gradients[word_a] += loss ** 2 * */ __pyx_t_16 = __pyx_v_word_a; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_16)) )) -= (__pyx_v_learning_rate * __pyx_v_loss); /* "glove/glove_cython.pyx":104 * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) * wordbias[word_a] -= learning_rate * loss * wordbias_sum_gradients[word_a] += loss ** 2 # <<<<<<<<<<<<<< * * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) */ __pyx_t_17 = __pyx_v_word_a; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_17)) )) += pow(__pyx_v_loss, 2.0); /* "glove/glove_cython.pyx":106 * wordbias_sum_gradients[word_a] += loss ** 2 * * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) # <<<<<<<<<<<<<< * wordbias[word_b] -= learning_rate * loss * wordbias_sum_gradients[word_b] += loss ** 2 */ __pyx_t_18 = __pyx_v_word_b; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_18)) ))))); /* "glove/glove_cython.pyx":107 * * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) * wordbias[word_b] -= learning_rate * loss # <<<<<<<<<<<<<< * wordbias_sum_gradients[word_b] += loss ** 2 * */ __pyx_t_18 = __pyx_v_word_b; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_18)) )) -= (__pyx_v_learning_rate * __pyx_v_loss); /* "glove/glove_cython.pyx":108 * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) * wordbias[word_b] -= learning_rate * loss * wordbias_sum_gradients[word_b] += loss ** 2 # <<<<<<<<<<<<<< * * */ __pyx_t_19 = __pyx_v_word_b; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_19)) )) += pow(__pyx_v_loss, 2.0); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "glove/glove_cython.pyx":59 * # We iterate over random indices to simulate * # shuffling the cooccurrence matrix. * with nogil: # <<<<<<<<<<<<<< * for j in prange(no_cooccurrences, num_threads=no_threads, * schedule='dynamic'): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec_sum_gradients, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordbias, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordbias_sum_gradients, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_row, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_col, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_counts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_shuffle_indices, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ /* Python wrapper */ static PyObject *__pyx_pw_5glove_12glove_cython_3transform_paragraph(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5glove_12glove_cython_2transform_paragraph[] = "\n Compute a vector representation of a paragraph. This has\n the effect of making the paragraph vector close to words\n that occur in it. The representation should be more\n similar to words that occur in it multiple times, and\n less close to words that are common in the corpus (have\n large word bias values).\n\n This should be be similar to a tf-idf weighting.\n "; static PyMethodDef __pyx_mdef_5glove_12glove_cython_3transform_paragraph = {"transform_paragraph", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5glove_12glove_cython_3transform_paragraph, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5glove_12glove_cython_2transform_paragraph}; static PyObject *__pyx_pw_5glove_12glove_cython_3transform_paragraph(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordbias = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_paragraphvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_row = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_counts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_shuffle_indices = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_initial_learning_rate; double __pyx_v_max_count; double __pyx_v_alpha; int __pyx_v_epochs; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("transform_paragraph (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordbias,&__pyx_n_s_paragraphvec,&__pyx_n_s_sum_gradients,&__pyx_n_s_row,&__pyx_n_s_counts,&__pyx_n_s_shuffle_indices,&__pyx_n_s_initial_learning_rate,&__pyx_n_s_max_count,&__pyx_n_s_alpha,&__pyx_n_s_epochs,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); CYTHON_FALLTHROUGH; case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); CYTHON_FALLTHROUGH; case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); CYTHON_FALLTHROUGH; case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordbias)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 1); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paragraphvec)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 2); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sum_gradients)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 3); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_row)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 4); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 5); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shuffle_indices)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 6); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 7: if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_initial_learning_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 7); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 8: if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_count)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 8); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 9: if (likely((values[9] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 9); __PYX_ERR(0, 111, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 10: if (likely((values[10] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_epochs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 10); __PYX_ERR(0, 111, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "transform_paragraph") < 0)) __PYX_ERR(0, 111, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec.memview)) __PYX_ERR(0, 111, __pyx_L3_error) __pyx_v_wordbias = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordbias.memview)) __PYX_ERR(0, 112, __pyx_L3_error) __pyx_v_paragraphvec = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paragraphvec.memview)) __PYX_ERR(0, 113, __pyx_L3_error) __pyx_v_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_sum_gradients.memview)) __PYX_ERR(0, 114, __pyx_L3_error) __pyx_v_row = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_row.memview)) __PYX_ERR(0, 115, __pyx_L3_error) __pyx_v_counts = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_counts.memview)) __PYX_ERR(0, 116, __pyx_L3_error) __pyx_v_shuffle_indices = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[6], PyBUF_WRITABLE); if (unlikely(!__pyx_v_shuffle_indices.memview)) __PYX_ERR(0, 117, __pyx_L3_error) __pyx_v_initial_learning_rate = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_initial_learning_rate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 118, __pyx_L3_error) __pyx_v_max_count = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_max_count == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 120, __pyx_L3_error) __pyx_v_epochs = __Pyx_PyInt_As_int(values[10]); if (unlikely((__pyx_v_epochs == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 121, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 111, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("glove.glove_cython.transform_paragraph", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5glove_12glove_cython_2transform_paragraph(__pyx_self, __pyx_v_wordvec, __pyx_v_wordbias, __pyx_v_paragraphvec, __pyx_v_sum_gradients, __pyx_v_row, __pyx_v_counts, __pyx_v_shuffle_indices, __pyx_v_initial_learning_rate, __pyx_v_max_count, __pyx_v_alpha, __pyx_v_epochs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5glove_12glove_cython_2transform_paragraph(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_paragraphvec, __Pyx_memviewslice __pyx_v_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, int __pyx_v_epochs) { int __pyx_v_dim; int __pyx_v_no_cooccurrences; int __pyx_v_word_b; double __pyx_v_count; double __pyx_v_prediction; double __pyx_v_entry_weight; double __pyx_v_loss; double __pyx_v_gradient; CYTHON_UNUSED int __pyx_v_epoch; int __pyx_v_i; int __pyx_v_j; int __pyx_v_shuffle_index; double __pyx_v_learning_rate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("transform_paragraph", 0); /* "glove/glove_cython.pyx":135 * # Get number of latent dimensions and * # number of cooccurrences. * cdef int dim = wordvec.shape[1] # <<<<<<<<<<<<<< * cdef int no_cooccurrences = row.shape[0] * */ __pyx_v_dim = (__pyx_v_wordvec.shape[1]); /* "glove/glove_cython.pyx":136 * # number of cooccurrences. * cdef int dim = wordvec.shape[1] * cdef int no_cooccurrences = row.shape[0] # <<<<<<<<<<<<<< * * # Hold indices of current words and */ __pyx_v_no_cooccurrences = (__pyx_v_row.shape[0]); /* "glove/glove_cython.pyx":154 * # We iterate over random indices to simulate * # shuffling the cooccurrence matrix. * for epoch in range(epochs): # <<<<<<<<<<<<<< * for j in range(no_cooccurrences): * shuffle_index = shuffle_indices[j] */ __pyx_t_1 = __pyx_v_epochs; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_epoch = __pyx_t_3; /* "glove/glove_cython.pyx":155 * # shuffling the cooccurrence matrix. * for epoch in range(epochs): * for j in range(no_cooccurrences): # <<<<<<<<<<<<<< * shuffle_index = shuffle_indices[j] * */ __pyx_t_4 = __pyx_v_no_cooccurrences; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_j = __pyx_t_6; /* "glove/glove_cython.pyx":156 * for epoch in range(epochs): * for j in range(no_cooccurrences): * shuffle_index = shuffle_indices[j] # <<<<<<<<<<<<<< * * word_b = row[shuffle_index] */ __pyx_t_7 = __pyx_v_j; __pyx_v_shuffle_index = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_shuffle_indices.data) + __pyx_t_7)) ))); /* "glove/glove_cython.pyx":158 * shuffle_index = shuffle_indices[j] * * word_b = row[shuffle_index] # <<<<<<<<<<<<<< * count = counts[shuffle_index] * */ __pyx_t_7 = __pyx_v_shuffle_index; __pyx_v_word_b = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_row.data) + __pyx_t_7)) ))); /* "glove/glove_cython.pyx":159 * * word_b = row[shuffle_index] * count = counts[shuffle_index] # <<<<<<<<<<<<<< * * # Get prediction */ __pyx_t_7 = __pyx_v_shuffle_index; __pyx_v_count = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_counts.data) + __pyx_t_7)) ))); /* "glove/glove_cython.pyx":162 * * # Get prediction * prediction = 0.0 # <<<<<<<<<<<<<< * for i in range(dim): * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] */ __pyx_v_prediction = 0.0; /* "glove/glove_cython.pyx":163 * # Get prediction * prediction = 0.0 * for i in range(dim): # <<<<<<<<<<<<<< * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] * prediction += wordbias[word_b] */ __pyx_t_8 = __pyx_v_dim; __pyx_t_9 = __pyx_t_8; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_i = __pyx_t_10; /* "glove/glove_cython.pyx":164 * prediction = 0.0 * for i in range(dim): * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] # <<<<<<<<<<<<<< * prediction += wordbias[word_b] * */ __pyx_t_7 = __pyx_v_i; __pyx_t_11 = __pyx_v_word_b; __pyx_t_12 = __pyx_v_i; __pyx_v_prediction = (__pyx_v_prediction + ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_paragraphvec.data) + __pyx_t_7)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_11 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_12)) ))))); } /* "glove/glove_cython.pyx":165 * for i in range(dim): * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] * prediction += wordbias[word_b] # <<<<<<<<<<<<<< * * # Compute loss and the example weight. */ __pyx_t_12 = __pyx_v_word_b; __pyx_v_prediction = (__pyx_v_prediction + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_12)) )))); /* "glove/glove_cython.pyx":168 * * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha # <<<<<<<<<<<<<< * loss = entry_weight * (prediction - c_log(count)) * */ __pyx_v_entry_weight = pow(__pyx_f_5glove_12glove_cython_double_min(1.0, (__pyx_v_count / __pyx_v_max_count)), __pyx_v_alpha); /* "glove/glove_cython.pyx":169 * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha * loss = entry_weight * (prediction - c_log(count)) # <<<<<<<<<<<<<< * * # Update step: apply gradients. */ __pyx_v_loss = (__pyx_v_entry_weight * (__pyx_v_prediction - log(__pyx_v_count))); /* "glove/glove_cython.pyx":172 * * # Update step: apply gradients. * for i in range(dim): # <<<<<<<<<<<<<< * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) * gradient = loss * wordvec[word_b, i] */ __pyx_t_8 = __pyx_v_dim; __pyx_t_9 = __pyx_t_8; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_i = __pyx_t_10; /* "glove/glove_cython.pyx":173 * # Update step: apply gradients. * for i in range(dim): * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) # <<<<<<<<<<<<<< * gradient = loss * wordvec[word_b, i] * paragraphvec[i] = (paragraphvec[i] - learning_rate */ __pyx_t_12 = __pyx_v_i; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sum_gradients.data) + __pyx_t_12)) ))))); /* "glove/glove_cython.pyx":174 * for i in range(dim): * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) * gradient = loss * wordvec[word_b, i] # <<<<<<<<<<<<<< * paragraphvec[i] = (paragraphvec[i] - learning_rate * * gradient) */ __pyx_t_12 = __pyx_v_word_b; __pyx_t_11 = __pyx_v_i; __pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_12 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_11)) )))); /* "glove/glove_cython.pyx":175 * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) * gradient = loss * wordvec[word_b, i] * paragraphvec[i] = (paragraphvec[i] - learning_rate # <<<<<<<<<<<<<< * * gradient) * sum_gradients[i] += gradient ** 2 */ __pyx_t_11 = __pyx_v_i; /* "glove/glove_cython.pyx":176 * gradient = loss * wordvec[word_b, i] * paragraphvec[i] = (paragraphvec[i] - learning_rate * * gradient) # <<<<<<<<<<<<<< * sum_gradients[i] += gradient ** 2 */ __pyx_t_12 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_paragraphvec.data) + __pyx_t_12)) )) = ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_paragraphvec.data) + __pyx_t_11)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient)); /* "glove/glove_cython.pyx":177 * paragraphvec[i] = (paragraphvec[i] - learning_rate * * gradient) * sum_gradients[i] += gradient ** 2 # <<<<<<<<<<<<<< */ __pyx_t_11 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sum_gradients.data) + __pyx_t_11)) )) += pow(__pyx_v_gradient, 2.0); } } } /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordbias, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_paragraphvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_sum_gradients, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_row, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_counts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_shuffle_indices, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "glove.glove_cython.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "glove.glove_cython.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "glove.glove_cython.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "glove.glove_cython._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_glove_cython(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_glove_cython}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "glove_cython", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s__19, __pyx_k__19, sizeof(__pyx_k__19), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_col, __pyx_k_col, sizeof(__pyx_k_col), 0, 0, 1, 1}, {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dim, __pyx_k_dim, sizeof(__pyx_k_dim), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_entry_weight, __pyx_k_entry_weight, sizeof(__pyx_k_entry_weight), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_epoch, __pyx_k_epoch, sizeof(__pyx_k_epoch), 0, 0, 1, 1}, {&__pyx_n_s_epochs, __pyx_k_epochs, sizeof(__pyx_k_epochs), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_fit_vectors, __pyx_k_fit_vectors, sizeof(__pyx_k_fit_vectors), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_glove_glove_cython, __pyx_k_glove_glove_cython, sizeof(__pyx_k_glove_glove_cython), 0, 0, 1, 1}, {&__pyx_kp_s_glove_glove_cython_pyx, __pyx_k_glove_glove_cython_pyx, sizeof(__pyx_k_glove_glove_cython_pyx), 0, 0, 1, 0}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_initial_learning_rate, __pyx_k_initial_learning_rate, sizeof(__pyx_k_initial_learning_rate), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_learning_rate, __pyx_k_learning_rate, sizeof(__pyx_k_learning_rate), 0, 0, 1, 1}, {&__pyx_n_s_loss, __pyx_k_loss, sizeof(__pyx_k_loss), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_max_count, __pyx_k_max_count, sizeof(__pyx_k_max_count), 0, 0, 1, 1}, {&__pyx_n_s_max_loss, __pyx_k_max_loss, sizeof(__pyx_k_max_loss), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_n_s_no_cooccurrences, __pyx_k_no_cooccurrences, sizeof(__pyx_k_no_cooccurrences), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_no_threads, __pyx_k_no_threads, sizeof(__pyx_k_no_threads), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_paragraphvec, __pyx_k_paragraphvec, sizeof(__pyx_k_paragraphvec), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_prediction, __pyx_k_prediction, sizeof(__pyx_k_prediction), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_row, __pyx_k_row, sizeof(__pyx_k_row), 0, 0, 1, 1}, {&__pyx_n_s_scipy_sparse, __pyx_k_scipy_sparse, sizeof(__pyx_k_scipy_sparse), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_shuffle_index, __pyx_k_shuffle_index, sizeof(__pyx_k_shuffle_index), 0, 0, 1, 1}, {&__pyx_n_s_shuffle_indices, __pyx_k_shuffle_indices, sizeof(__pyx_k_shuffle_indices), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_sp, __pyx_k_sp, sizeof(__pyx_k_sp), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_sum_gradients, __pyx_k_sum_gradients, sizeof(__pyx_k_sum_gradients), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_transform_paragraph, __pyx_k_transform_paragraph, sizeof(__pyx_k_transform_paragraph), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_word_a, __pyx_k_word_a, sizeof(__pyx_k_word_a), 0, 0, 1, 1}, {&__pyx_n_s_word_b, __pyx_k_word_b, sizeof(__pyx_k_word_b), 0, 0, 1, 1}, {&__pyx_n_s_wordbias, __pyx_k_wordbias, sizeof(__pyx_k_wordbias), 0, 0, 1, 1}, {&__pyx_n_s_wordbias_sum_gradients, __pyx_k_wordbias_sum_gradients, sizeof(__pyx_k_wordbias_sum_gradients), 0, 0, 1, 1}, {&__pyx_n_s_wordvec, __pyx_k_wordvec, sizeof(__pyx_k_wordvec), 0, 0, 1, 1}, {&__pyx_n_s_wordvec_sum_gradients, __pyx_k_wordvec_sum_gradients, sizeof(__pyx_k_wordvec_sum_gradients), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 70, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ __pyx_tuple__20 = PyTuple_Pack(26, __pyx_n_s_wordvec, __pyx_n_s_wordvec_sum_gradients, __pyx_n_s_wordbias, __pyx_n_s_wordbias_sum_gradients, __pyx_n_s_row, __pyx_n_s_col, __pyx_n_s_counts, __pyx_n_s_shuffle_indices, __pyx_n_s_initial_learning_rate, __pyx_n_s_max_count, __pyx_n_s_alpha, __pyx_n_s_max_loss, __pyx_n_s_no_threads, __pyx_n_s_dim, __pyx_n_s_no_cooccurrences, __pyx_n_s_word_a, __pyx_n_s_word_b, __pyx_n_s_count, __pyx_n_s_learning_rate, __pyx_n_s_gradient, __pyx_n_s_prediction, __pyx_n_s_entry_weight, __pyx_n_s_loss, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_shuffle_index); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); __pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(13, 0, 26, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__20, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_glove_glove_cython_pyx, __pyx_n_s_fit_vectors, 20, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) __PYX_ERR(0, 20, __pyx_L1_error) /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ __pyx_tuple__22 = PyTuple_Pack(25, __pyx_n_s_wordvec, __pyx_n_s_wordbias, __pyx_n_s_paragraphvec, __pyx_n_s_sum_gradients, __pyx_n_s_row, __pyx_n_s_counts, __pyx_n_s_shuffle_indices, __pyx_n_s_initial_learning_rate, __pyx_n_s_max_count, __pyx_n_s_alpha, __pyx_n_s_epochs, __pyx_n_s_dim, __pyx_n_s_no_cooccurrences, __pyx_n_s_word_b, __pyx_n_s_word_a, __pyx_n_s_count, __pyx_n_s_prediction, __pyx_n_s_entry_weight, __pyx_n_s_loss, __pyx_n_s_gradient, __pyx_n_s_epoch, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_shuffle_index, __pyx_n_s_learning_rate); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); __pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(11, 0, 25, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_glove_glove_cython_pyx, __pyx_n_s_transform_paragraph, 111, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 111, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__29 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initglove_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initglove_cython(void) #else __Pyx_PyMODINIT_FUNC PyInit_glove_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_glove_cython(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_glove_cython(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; static PyThread_type_lock __pyx_t_3[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'glove_cython' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_glove_cython(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("glove_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_glove__glove_cython) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "glove.glove_cython")) { if (unlikely(PyDict_SetItemString(modules, "glove.glove_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "glove/glove_cython.pyx":4 * #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False * * import numpy as np # <<<<<<<<<<<<<< * import scipy.sparse as sp * import collections */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "glove/glove_cython.pyx":5 * * import numpy as np * import scipy.sparse as sp # <<<<<<<<<<<<<< * import collections * from cython.parallel import parallel, prange */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s__19); __Pyx_GIVEREF(__pyx_n_s__19); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s__19); __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_sp, __pyx_t_2) < 0) __PYX_ERR(0, 5, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":6 * import numpy as np * import scipy.sparse as sp * import collections # <<<<<<<<<<<<<< * from cython.parallel import parallel, prange * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_collections, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_collections, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5glove_12glove_cython_1fit_vectors, NULL, __pyx_n_s_glove_glove_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_fit_vectors, __pyx_t_2) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5glove_12glove_cython_3transform_paragraph, NULL, __pyx_n_s_glove_glove_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_transform_paragraph, __pyx_t_2) < 0) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":1 * #!python # <<<<<<<<<<<<<< * #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False * */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_3[0] = PyThread_allocate_lock(); __pyx_t_3[1] = PyThread_allocate_lock(); __pyx_t_3[2] = PyThread_allocate_lock(); __pyx_t_3[3] = PyThread_allocate_lock(); __pyx_t_3[4] = PyThread_allocate_lock(); __pyx_t_3[5] = PyThread_allocate_lock(); __pyx_t_3[6] = PyThread_allocate_lock(); __pyx_t_3[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init glove.glove_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init glove.glove_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return PyUnicode_FromUnicode(NULL, 0); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
Square.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Square.c" #else static int nn_(Square_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = (*input_data) * (*input_data);); } else { real* output_data = THTensor_(data)(output); real* input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for(i = 0; i < THTensor_(nElement)(input); i++) output_data[i] = input_data[i]*input_data[i]; } return 1; } static int nn_(Square_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id)); THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \ *gradInput_data = (*gradOutput_data) * (*input_data);); } else { real* gradOutput_data = THTensor_(data)(gradOutput); real* gradInput_data = THTensor_(data)(gradInput); real* input_data = THTensor_(data)(input); long i; #pragma omp parallel for private(i) for(i = 0; i < THTensor_(nElement)(gradInput); i++) gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i]; } return 1; } static const struct luaL_Reg nn_(Square__) [] = { {"Square_updateOutput", nn_(Square_updateOutput)}, {"Square_updateGradInput", nn_(Square_updateGradInput)}, {NULL, NULL} }; static void nn_(Square_init)(lua_State *L) { luaT_pushmetaclass(L, torch_(Tensor_id)); luaT_registeratname(L, nn_(Square__), "nn"); lua_pop(L,1); } #endif
convolution_winograd_transform_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } static void conv3x3s1_winograd64_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[6][8][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _out0tm6 = vld1q_f16(output0_tm_6); float16x8_t _out0tm7 = vld1q_f16(output0_tm_7); float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2); float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4); float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6); float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6); float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)); float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[4][m], _tmp4m); float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02); float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04); float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06); float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06); float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f))); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 32, _out04); float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f))); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 24, _out03); vst1q_f16(output0 + 40, _out05); output0 += outw * 8; } } } } } static void conv3x3s1_winograd42_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[6][6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); float16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); float16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); float16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); float16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); float16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); vst1q_f16(tmp[5][m], _tmp5m); r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _r0tm0 = vfmsq_n_f16(vfmaq_n_f16(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float16x8_t _r0tm1 = vfmsq_n_f16(vaddq_f16(_tmp04, _tmp03), vaddq_f16(_tmp01, _tmp02), 4.f); float16x8_t _r0tm2 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp03), vsubq_f16(_tmp01, _tmp02), 4.f); float16x8_t _r0tm3 = vfmsq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f); float16x8_t _r0tm4 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f); float16x8_t _r0tm5 = vfmsq_n_f16(vfmaq_n_f16(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } static void conv3x3s1_winograd42_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[4][6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _tmp02a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp13a = vsubq_f16(_out0tm1, _out0tm2); float16x8_t _tmp02b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp13b = vsubq_f16(_out0tm3, _out0tm4); float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp02a), _tmp02b); float16x8_t _tmp1m = vfmaq_n_f16(_tmp13a, _tmp13b, 2.f); float16x8_t _tmp2m = vfmaq_n_f16(_tmp02a, _tmp02b, 4.f); float16x8_t _tmp3m = vfmaq_n_f16(vaddq_f16(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 48; output0_tm_1 += tiles * 48; output0_tm_2 += tiles * 48; output0_tm_3 += tiles * 48; output0_tm_4 += tiles * 48; output0_tm_5 += tiles * 48; } for (int m = 0; m < 4; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp02a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp13a = vsubq_f16(_tmp01, _tmp02); float16x8_t _tmp02b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp13b = vsubq_f16(_tmp03, _tmp04); float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp02a), _tmp02b)); float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp13a, _tmp13b, 2.f)); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp02a, _tmp02b, 4.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vaddq_f16(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 24, _out03); output0 += outw * 8; } } } } }
ompbarrier.c
#include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int counter; const int n = 16; #pragma omp parallel private(counter) { for(counter = 0; counter < n; counter++) { #pragma omp barrier printf("Performing iteration %d\n", counter); fflush(stdout); #pragma omp barrier } } }
decrypt.c
#include <stdio.h> #include "gcrypt.h" #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <strings.h> #include <omp.h> #include <time.h> #include <inttypes.h> #include "cryptwrapper.h" #include "rainbowvalue.h" #include "readBulk.h" int main(int argc, char const *argv[]) { if (argc < 3){ printf("please select rainbow table and encrypted file\n"); return 1; } FILE * fptr_rainbow = fopen(argv[1],"rb"); FILE * fptr_encry = fopen(argv[2],"rb"); if (fptr_encry==NULL){ printf("could not open encrypted file %s\n",argv[1]); return 1; } if (fptr_rainbow==NULL){ printf("could not open rainbow table file %s\n",argv[2]); return 1; } mycryptwrapper_init(); // to calculate hash without trailing 256 bits of decrypted file size_t mens = gcry_md_get_algo_dlen(algo); void * digest=malloc(mens); // initialization vector for decryption and encryption size_t len = 8; size_t file_len; if (fseek(fptr_encry,0,SEEK_END)){ perror("could not ssek end of file\n"); return 1; } file_len = ftell(fptr_encry); if (file_len==0){ perror("file length is 0\n"); return 1; } rewind(fptr_encry); // go to beginning of file uint8_t * buf = malloc(file_len); if (fread(buf, sizeof(uint8_t),file_len,fptr_encry)!= file_len){ perror("could not read complete file\n"); return 1; } //encrypted file is stored in buf and can now be decrypted struct s_rainbowvalue256 * rs = malloc(sizeof(struct s_rainbowvalue256)*BULKSIZE); size_t num_rainbow_values; clock_t old_clock; // read a block of rainbow values int success = 0; while ((num_rainbow_values=fread(rs, sizeof(struct s_rainbowvalue256),BULKSIZE,fptr_rainbow ))!=0){ if (success==1){break;} //printf("read %d rainbow values\n", (int) num_rainbow_values); // iterate through rainbow values and decrypt old_clock=clock(); #pragma omp parallel for for (size_t i = 0 ; i < num_rainbow_values ; i++){ if (success==1){ #pragma omp exitregion } uint8_t * decrypted_buf = malloc(file_len);//allocate mem for decrypted buffer gcry_cipher_hd_t dhd; if (gcry_cipher_open(&dhd,cipher,GCRY_CIPHER_MODE_CFB,0)){perror("could not open cypher\n");} if (gcry_cipher_setkey(dhd,rs[i].hash,8)){perror("could not set key\n");}; void * iv = malloc(len); memset(iv,0,len); if (gcry_cipher_setiv(dhd, iv , len)){perror("could not set init vector\n");} if (gcry_cipher_decrypt(dhd,decrypted_buf,file_len,buf,file_len)){perror("could not decrypt\n");} //mycryptwrapper_print(decrypted_buf,file_len); //printf("pw: %s\nfile:%s\n",rs[i].pw,decrypted_buf); if (check_sha256_tag(decrypted_buf,file_len)){ printf("pw: %s\n", rs[i].pw); char * enc_fname = malloc(strlen(argv[2])+5); strcpy(enc_fname, argv[2]); strcat(enc_fname,".decr"); FILE * encrypted_fptr = fopen(enc_fname,"wb"); if (fwrite(decrypted_buf,1,file_len-32,encrypted_fptr)!=file_len-32){ perror("couln not write all data to decrypted file"); //return 1; #pragma omp exitregion }; printf("successfully saved decrypted data in %s\n", enc_fname); //return 0; success=1; #pragma omp exitregion }// decryption succesfull end free(iv); free(decrypted_buf); gcry_cipher_close(dhd); }// end parallel float sec = (float)((clock()-old_clock))/(float)CLOCKS_PER_SEC; printf("\rcalc/sec: %4.0f", num_rainbow_values/sec); } if(success==0){ printf("\nnothing found\n"); } return 0; }
threading_utils.h
/*! * Copyright 2015-2019 by Contributors * \file common.h * \brief Threading utilities */ #ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <dmlc/omp.h> #include <algorithm> #include <limits> #include <type_traits> // std::is_signed #include <vector> #include "xgboost/logging.h" #if !defined(_OPENMP) extern "C" { inline int32_t omp_get_thread_limit() __GOMP_NOTHROW { return 1; } // NOLINT } #endif // !defined(_OPENMP) // MSVC doesn't implement the thread limit. #if defined(_OPENMP) && defined(_MSC_VER) extern "C" { inline int32_t omp_get_thread_limit() { return std::numeric_limits<int32_t>::max(); } // NOLINT } #endif // defined(_MSC_VER) namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); CHECK_GE(nthreads, 1); dmlc::OMPException exc; #pragma omp parallel num_threads(nthreads) { exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } exc.Rethrow(); } /** * OpenMP schedule */ struct Sched { enum { kAuto, kDynamic, kStatic, kGuided, } sched; size_t chunk{0}; Sched static Auto() { return Sched{kAuto}; } Sched static Dyn(size_t n = 0) { return Sched{kDynamic, n}; } Sched static Static(size_t n = 0) { return Sched{kStatic, n}; } Sched static Guided() { return Sched{kGuided}; } }; template <typename Index, typename Func> void ParallelFor(Index size, int32_t n_threads, Sched sched, Func fn) { #if defined(_MSC_VER) // msvc doesn't support unsigned integer as openmp index. using OmpInd = std::conditional_t<std::is_signed<Index>::value, Index, omp_ulong>; #else using OmpInd = Index; #endif OmpInd length = static_cast<OmpInd>(size); dmlc::OMPException exc; switch (sched.sched) { case Sched::kAuto: { #pragma omp parallel for num_threads(n_threads) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } case Sched::kDynamic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(dynamic) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(dynamic, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kStatic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(static) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(static, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kGuided: { #pragma omp parallel for num_threads(n_threads) schedule(guided) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, size_t n_threads, Func fn) { ParallelFor(size, n_threads, Sched::Static(), fn); } // FIXME(jiamingy): Remove this function to get rid of `omp_set_num_threads`, which sets a // global variable in runtime and affects other programs in the same process. template <typename Index, typename Func> void ParallelFor(Index size, Func fn) { ParallelFor(size, omp_get_max_threads(), Sched::Static(), fn); } // !defined(_OPENMP) inline int32_t OmpGetThreadLimit() { int32_t limit = omp_get_thread_limit(); CHECK_GE(limit, 1) << "Invalid thread limit for OpenMP."; return limit; } /* \brief Configure parallel threads. * * \param p_threads Number of threads, when it's less than or equal to 0, this function * will change it to number of process on system. * * \return Global openmp max threads before configuration. */ inline int32_t OmpSetNumThreads(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = omp_get_num_procs(); } threads = std::min(threads, OmpGetThreadLimit()); omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpGetNumThreads(int32_t n_threads) { if (n_threads <= 0) { n_threads = std::min(omp_get_num_procs(), omp_get_max_threads()); } n_threads = std::min(n_threads, OmpGetThreadLimit()); n_threads = std::max(n_threads, 1); return n_threads; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
mclib.c
#include "mcrat.h" //define constants const double A_RAD=7.56e-15, C_LIGHT=2.99792458e10, PL_CONST=6.6260755e-27, FINE_STRUCT=7.29735308e-3, CHARGE_EL= 4.8032068e-10; const double K_B=1.380658e-16, M_P=1.6726231e-24, THOM_X_SECT=6.65246e-25, M_EL=9.1093879e-28 , R_EL=2.817941499892705e-13; void photonInjection(struct photon **ph, int *ph_num, double r_inj, double ph_weight, int min_photons, int max_photons, char spect, double theta_min, double theta_max, struct hydro_dataframe *hydro_data, gsl_rng * rand, FILE *fPtr) { int i=0, block_cnt=0, *ph_dens=NULL, ph_tot=0, j=0,k=0; double ph_dens_calc=0.0, fr_dum=0.0, y_dum=0.0, yfr_dum=0.0, fr_max=0, bb_norm=0, position_phi, ph_weight_adjusted, rmin, rmax; double com_v_phi, com_v_theta, *p_comv=NULL, *boost=NULL; //comoving phi, theta, comoving 4 momentum for a photon, and boost for photon(to go to lab frame) double *l_boost=NULL; //pointer to hold array of lorentz boost, to lab frame, values float num_dens_coeff; double r_grid_innercorner=0, r_grid_outercorner=0, theta_grid_innercorner=0, theta_grid_outercorner=0; double position_rand=0, position2_rand=0, position3_rand=0, cartesian_position_rand_array[3]; if (spect=='w') //from MCRAT paper, w for wien spectrum { num_dens_coeff=8.44; //printf("in wien spectrum\n"); } else { num_dens_coeff=20.29; //this is for black body spectrum //printf("in BB spectrum"); } //find how many blocks are near the injection radius within the angles defined in mc.par, get temperatures and calculate number of photons to allocate memory for //and then rcord which blocks have to have "x" amount of photons injected there rmin=r_inj - 0.5*C_LIGHT/hydro_data->fps; rmax=r_inj + 0.5*C_LIGHT/hydro_data->fps; for(i=0; i<hydro_data->num_elements; i++) { #if DIMENSIONS == THREE //want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0 //hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]-0.5*(hydro_data->r2_size)[i]); //hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]+0.5*(hydro_data->r2_size)[i]); //therefore do whats below hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]); hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]); #else hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0); hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0); #endif //look at all boxes in width delta r=c/fps and within angles we are interested in //if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max) && ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1_size)[i]<0.09)) if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max)) { //&& ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1)[i]<3.0*3.14/180) is just for testing sph_3d mcrat sim to see if block_cnt is the issue for the 200x normalization issue -> this fixed norm issue, not N_scatt issue when start at frame 0 // also try injecting photons in frame 1 without above conditions -> didnt fix normalization issue not N_scatt issue // also try inj at frame 1 with scale 1e11 -> didnt fixed normalization issue not N_scatt issue // also try inj at frame 0 (orig) to see what gets printed for diagnosing CHOMBO refinement levels being an issue // try inj at frame 0 with modified if statement and L scale 1e11 block_cnt++; //#if DIMENSIONS == THREE //fprintf(fPtr,"rmin %e rmax %e thetamin %e thetamax %e hydro: r0 %e r1 %e r2 %e r0_size %e r1_size %e r2_size %e r_inner %e theta_inner %e r_outer %e theta_outer %e\n", rmin, rmax, theta_min, theta_max, (hydro_data->r0)[i], (hydro_data->r1)[i], (hydro_data->r2)[i], (hydro_data->r0_size)[i], (hydro_data->r1_size)[i], (hydro_data->r2_size)[i], r_grid_innercorner, theta_grid_innercorner, r_grid_outercorner, theta_grid_outercorner); //#else //fprintf(fPtr,"rmin %e rmax %e thetamin %e thetamax %e hydro: r0 %e r1 %e r0_size %e r1_size %e r_inner %e theta_inner %e r_outer %e theta_outer %e dens %e\n", rmin, rmax, theta_min, theta_max, (hydro_data->r0)[i], (hydro_data->r1)[i], (hydro_data->r0_size)[i], (hydro_data->r1_size)[i], r_grid_innercorner, theta_grid_innercorner, r_grid_outercorner, theta_grid_outercorner, (hydro_data->dens)[i]); //#endif //fflush(fPtr); } } //printf("Blocks: %d\n", block_cnt); //allocate memory to record density of photons for each block ph_dens=malloc(block_cnt * sizeof(int)); //calculate the photon density for each block and save it to the array j=0; ph_tot=0; ph_weight_adjusted=ph_weight; //printf("%d %d\n", max_photons, min_photons); while ((ph_tot>max_photons) || (ph_tot<min_photons) ) { j=0; ph_tot=0; for (i=0;i<hydro_data->num_elements;i++) { //printf("%d\n",i); //printf("%e, %e, %e, %e, %e, %e\n", *(r+i),(r_inj - C_LIGHT/fps), (r_inj + C_LIGHT/fps), *(theta+i) , theta_max, theta_min); #if DIMENSIONS == THREE //want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0 //hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]-0.5*(hydro_data->r2_size)[i]); //hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]+0.5*(hydro_data->r2_size)[i]); //therefore do whats below hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]); hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]); #else hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0); hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0); #endif //if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max) && ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1_size)[i]<0.09)) if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max)) { ph_dens_calc=(4.0/3.0)*hydroElementVolume(hydro_data, i) *(((hydro_data->gamma)[i]*num_dens_coeff*(hydro_data->temp)[i]*(hydro_data->temp)[i]*(hydro_data->temp)[i])/ph_weight_adjusted); //4 comes from L \propto 4p in the limit radiation pressure is greater than the matter energy density and 3 comes from p=u/3, where u is the energy density (*(ph_dens+j))=gsl_ran_poisson(rand,ph_dens_calc) ; //choose from poission distribution with mean of ph_dens_calc //printf("%d, %lf \n",*(ph_dens+j), ph_dens_calc); //sum up all the densities to get total number of photons ph_tot+=(*(ph_dens+j)); j++; } } if (ph_tot>max_photons) { //if the number of photons is too big make ph_weight larger ph_weight_adjusted*=10; } else if (ph_tot<min_photons) { ph_weight_adjusted*=0.5; } //printf("dens: %d, photons: %d\n", *(ph_dens+(j-1)), ph_tot); } //printf("%d\n", ph_tot); //allocate memory for that many photons and also allocate memory to hold comoving 4 momentum of each photon and the velocity of the fluid (*ph)=malloc (ph_tot * sizeof (struct photon )); p_comv=malloc(4*sizeof(double)); boost=malloc(3*sizeof(double)); l_boost=malloc(4*sizeof(double)); //go through blocks and assign random energies/locations to proper number of photons ph_tot=0; k=0; for (i=0;i<hydro_data->num_elements;i++) { #if DIMENSIONS == THREE //want inner corner to be close to origin, therfore ned to have abs for 3D cartesian with negative coordinates, shouldnt affect the other geometry systems since theyre all defined from r=0, theta=0, phi=0 //hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]-0.5*(hydro_data->r2_size)[i]); //hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], (hydro_data->r2)[i]+0.5*(hydro_data->r2_size)[i]); //therefore do whats below hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, fabs((hydro_data->r0)[i])-0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])-0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])-0.5*(hydro_data->r2_size)[i]); hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, fabs((hydro_data->r0)[i])+0.5*(hydro_data->r0_size)[i], fabs((hydro_data->r1)[i])+0.5*(hydro_data->r1_size)[i], fabs((hydro_data->r2)[i])+0.5*(hydro_data->r2_size)[i]); #else hydroCoordinateToSpherical(&r_grid_innercorner, &theta_grid_innercorner, (hydro_data->r0)[i]-0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]-0.5*(hydro_data->r1_size)[i], 0); hydroCoordinateToSpherical(&r_grid_outercorner, &theta_grid_outercorner, (hydro_data->r0)[i]+0.5*(hydro_data->r0_size)[i], (hydro_data->r1)[i]+0.5*(hydro_data->r1_size)[i], 0); #endif //if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max) && ((hydro_data->r0_size)[i]<1e11) && ((hydro_data->r1_size)[i]<0.09)) if ((rmin <= r_grid_outercorner) && (r_grid_innercorner <= rmax ) && (theta_grid_outercorner >= theta_min) && (theta_grid_innercorner <= theta_max)) { for(j=0;j<( *(ph_dens+k) ); j++ ) { //have to get random frequency for the photon comoving frequency y_dum=1; //initalize loop yfr_dum=0; while (y_dum>yfr_dum) { fr_dum=gsl_rng_uniform_pos(rand)*6.3e11*((hydro_data->temp)[i]); //in Hz //printf("%lf, %lf ",gsl_rng_uniform_pos(rand), (*(temps+i))); y_dum=gsl_rng_uniform_pos(rand); //printf("%lf ",fr_dum); if (spect=='w') { yfr_dum=(1.0/(1.29e31))*pow((fr_dum/((hydro_data->temp)[i])),3.0)/(exp((PL_CONST*fr_dum)/(K_B*((hydro_data->temp)[i]) ))-1); //curve is normalized to maximum } else { fr_max=(5.88e10)*((hydro_data->temp)[i]);//(C_LIGHT*(*(temps+i)))/(0.29); //max frequency of bb bb_norm=(PL_CONST*fr_max * pow((fr_max/C_LIGHT),2.0))/(exp(PL_CONST*fr_max/(K_B*((hydro_data->temp)[i])))-1); //find value of bb at fr_max yfr_dum=((1.0/bb_norm)*PL_CONST*fr_dum * pow((fr_dum/C_LIGHT),2.0))/(exp(PL_CONST*fr_dum/(K_B*((hydro_data->temp)[i])))-1); //curve is normalized to vaue of bb @ max frequency } //printf("%lf, %lf,%lf,%e \n",(*(temps+i)),fr_dum, y_dum, yfr_dum); } //printf("i: %d freq:%lf\n ",ph_tot, fr_dum); #if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE position_phi=gsl_rng_uniform(rand)*2*M_PI; #else position_phi=0;//dont need this in 3D #endif com_v_phi=gsl_rng_uniform(rand)*2*M_PI; com_v_theta=acos((gsl_rng_uniform(rand)*2)-1); //printf("%lf, %lf, %lf\n", position_phi, com_v_phi, com_v_theta); //populate 4 momentum comoving array *(p_comv+0)=PL_CONST*fr_dum/C_LIGHT; *(p_comv+1)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*cos(com_v_phi); *(p_comv+2)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*sin(com_v_phi); *(p_comv+3)=(PL_CONST*fr_dum/C_LIGHT)*cos(com_v_theta); //populate boost matrix, not sure why multiplying by -1, seems to give correct answer in old python code... #if DIMENSIONS == THREE hydroVectorToCartesian(boost, (hydro_data->v0)[i], (hydro_data->v1)[i], (hydro_data->v2)[i], (hydro_data->r0)[i], (hydro_data->r1)[i], (hydro_data->r2)[i]); #elif DIMENSIONS == TWO_POINT_FIVE hydroVectorToCartesian(boost, (hydro_data->v0)[i], (hydro_data->v1)[i], (hydro_data->v2)[i], (hydro_data->r0)[i], (hydro_data->r1)[i], position_phi); #else //this may have to change if PLUTO can save vectors in 3D when conidering 2D sim hydroVectorToCartesian(boost, (hydro_data->v0)[i], (hydro_data->v1)[i], 0, (hydro_data->r0)[i], (hydro_data->r1)[i], position_phi); #endif (*(boost+0))*=-1; (*(boost+1))*=-1; (*(boost+2))*=-1; //boost to lab frame lorentzBoost(boost, p_comv, l_boost, 'p', fPtr); //printf("Assignemnt: %e, %e, %e, %e\n", *(l_boost+0), *(l_boost+1), *(l_boost+2),*(l_boost+3)); (*ph)[ph_tot].p0=(*(l_boost+0)); (*ph)[ph_tot].p1=(*(l_boost+1)); (*ph)[ph_tot].p2=(*(l_boost+2)); (*ph)[ph_tot].p3=(*(l_boost+3)); (*ph)[ph_tot].comv_p0=(*(p_comv+0)); (*ph)[ph_tot].comv_p1=(*(p_comv+1)); (*ph)[ph_tot].comv_p2=(*(p_comv+2)); (*ph)[ph_tot].comv_p3=(*(p_comv+3)); //place photons in rand positions within fluid element position_rand=gsl_rng_uniform_pos(rand)*((hydro_data->r0_size)[i])-0.5*((hydro_data->r0_size)[i]); //choose between -size/2 to size/2 position2_rand=gsl_rng_uniform_pos(rand)*((hydro_data->r1_size)[i])-0.5*((hydro_data->r1_size)[i]); #if DIMENSIONS == THREE position3_rand=gsl_rng_uniform_pos(rand)*((hydro_data->r2_size)[i])-0.5*((hydro_data->r2_size)[i]); hydroCoordinateToMcratCoordinate(&cartesian_position_rand_array, (hydro_data->r0)[i]+position_rand, (hydro_data->r1)[i]+position2_rand, (hydro_data->r2)[i]+position3_rand); #else hydroCoordinateToMcratCoordinate(&cartesian_position_rand_array, (hydro_data->r0)[i]+position_rand, (hydro_data->r1)[i]+position2_rand, position_phi); #endif //assign random position (*ph)[ph_tot].r0=cartesian_position_rand_array[0]; (*ph)[ph_tot].r1=cartesian_position_rand_array[1]; (*ph)[ph_tot].r2=cartesian_position_rand_array[2]; //fprintf(fPtr,"%d %e %e %e\n", ph_tot, (*ph)[ph_tot].r0, (*ph)[ph_tot].r1, (*ph)[ph_tot].r2); (*ph)[ph_tot].s0=1; //initalize stokes parameters as non polarized photon, stokes parameterized are normalized such that I always =1 (*ph)[ph_tot].s1=0; (*ph)[ph_tot].s2=0; (*ph)[ph_tot].s3=0; (*ph)[ph_tot].num_scatt=0; (*ph)[ph_tot].weight=ph_weight_adjusted; (*ph)[ph_tot].nearest_block_index=0; (*ph)[ph_tot].type=INJECTED_PHOTON; //i for injected //printf("%d\n",ph_tot); ph_tot++; } k++; } } *ph_num=ph_tot; //save number of photons //printf(" %d: %d\n", *(ph_dens+(k-1)), *ph_num); free(ph_dens); free(p_comv);free(boost); free(l_boost); //exit(0); } void lorentzBoost(double *boost, double *p_ph, double *result, char object, FILE *fPtr) { //function to perform lorentz boost //if doing boost for an electron last argument is 'e' and there wont be a check for zero norm //if doing boost for a photon last argument is 'p' and there will be a check for zero norm double beta=0, gamma=0, *boosted_p=NULL; gsl_vector_view b=gsl_vector_view_array(boost, 3); //make boost pointer into vector gsl_vector_view p=gsl_vector_view_array(p_ph, 4); //make boost pointer into vector gsl_matrix *lambda1= gsl_matrix_calloc (4, 4); //create matrix thats 4x4 to do lorentz boost gsl_vector *p_ph_prime =gsl_vector_calloc(4); //create vestor to hold lorentz boosted vector /* fprintf(fPtr,"Boost: %e, %e, %e, %e\n",gsl_blas_dnrm2(&b.vector), *(boost+0), *(boost+1), *(boost+2)); fflush(fPtr); fprintf(fPtr,"4 Momentum to Boost: %e, %e, %e, %e\n",*(p_ph+0), *(p_ph+1), *(p_ph+2), *(p_ph+3)); fflush(fPtr); */ //if magnitude of fluid velocity is != 0 do lorentz boost otherwise dont need to do a boost if (gsl_blas_dnrm2(&b.vector) > 0) { //fprintf(fPtr,"in If\n"); //fflush(fPtr); beta=gsl_blas_dnrm2(&b.vector); gamma=1.0/sqrt(1-beta*beta); //fprintf(fPtr,"Beta: %e\tGamma: %e\n",beta,gamma ); //fflush(fPtr); //initalize matrix values gsl_matrix_set(lambda1, 0,0, gamma); gsl_matrix_set(lambda1, 0,1, -1*gsl_vector_get(&b.vector,0)*gamma); gsl_matrix_set(lambda1, 0,2, -1*gsl_vector_get(&b.vector,1)*gamma); gsl_matrix_set(lambda1, 0,3, -1*gsl_vector_get(&b.vector,2)*gamma); gsl_matrix_set(lambda1, 1,1, 1+((gamma-1)*(gsl_vector_get(&b.vector,0)*gsl_vector_get(&b.vector,0))/(beta*beta) ) ); gsl_matrix_set(lambda1, 1,2, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,1)/(beta*beta) ) )); gsl_matrix_set(lambda1, 1,3, ((gamma-1)*(gsl_vector_get(&b.vector,0)* gsl_vector_get(&b.vector,2)/(beta*beta) ) )); gsl_matrix_set(lambda1, 2,2, 1+((gamma-1)*(gsl_vector_get(&b.vector,1)*gsl_vector_get(&b.vector,1))/(beta*beta) ) ); gsl_matrix_set(lambda1, 2,3, ((gamma-1)*(gsl_vector_get(&b.vector,1)* gsl_vector_get(&b.vector,2))/(beta*beta) ) ); gsl_matrix_set(lambda1, 3,3, 1+((gamma-1)*(gsl_vector_get(&b.vector,2)*gsl_vector_get(&b.vector,2))/(beta*beta) ) ); gsl_matrix_set(lambda1, 1,0, gsl_matrix_get(lambda1,0,1)); gsl_matrix_set(lambda1, 2,0, gsl_matrix_get(lambda1,0,2)); gsl_matrix_set(lambda1, 3,0, gsl_matrix_get(lambda1,0,3)); gsl_matrix_set(lambda1, 2,1, gsl_matrix_get(lambda1,1,2)); gsl_matrix_set(lambda1, 3,1, gsl_matrix_get(lambda1,1,3)); gsl_matrix_set(lambda1, 3,2, gsl_matrix_get(lambda1,2,3)); gsl_blas_dgemv(CblasNoTrans, 1, lambda1, &p.vector, 0, p_ph_prime ); /* fprintf(fPtr,"Lorentz Boost Matrix 0: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 0,0), gsl_matrix_get(lambda1, 0,1), gsl_matrix_get(lambda1, 0,2), gsl_matrix_get(lambda1, 0,3)); fflush(fPtr); fprintf(fPtr,"Lorentz Boost Matrix 1: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 1,0), gsl_matrix_get(lambda1, 1,1), gsl_matrix_get(lambda1, 1,2), gsl_matrix_get(lambda1, 1,3)); fflush(fPtr); fprintf(fPtr,"Lorentz Boost Matrix 2: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 2,0), gsl_matrix_get(lambda1, 2,1), gsl_matrix_get(lambda1, 2,2), gsl_matrix_get(lambda1, 2,3)); fflush(fPtr); fprintf(fPtr,"Lorentz Boost Matrix 3: %e,%e, %e, %e\n", gsl_matrix_get(lambda1, 3,0), gsl_matrix_get(lambda1, 3,1), gsl_matrix_get(lambda1, 3,2), gsl_matrix_get(lambda1, 3,3)); fflush(fPtr); fprintf(fPtr,"Before Check: %e %e %e %e\n ",gsl_vector_get(p_ph_prime, 0), gsl_vector_get(p_ph_prime, 1), gsl_vector_get(p_ph_prime, 2), gsl_vector_get(p_ph_prime, 3)); fflush(fPtr); */ //double check vector for 0 norm condition if photon if (object == 'p') { //fprintf(fPtr,"In if\n"); boosted_p=zeroNorm(gsl_vector_ptr(p_ph_prime, 0)); } else { boosted_p=gsl_vector_ptr(p_ph_prime, 0); } /* fprintf(fPtr,"After Check: %e %e %e %e\n ", *(boosted_p+0),*(boosted_p+1),*(boosted_p+2),*(boosted_p+3) ); fflush(fPtr); * */ } else { /* fprintf(fPtr,"in else"); fflush(fPtr); * */ //double check vector for 0 norm condition if (object=='p') { boosted_p=zeroNorm(p_ph); } else { //if 4 momentum isnt for photon and there is no boost to be done, we dont care about normality and just want back what was passed to lorentz boost boosted_p=gsl_vector_ptr(&p.vector, 0); } } //assign values to result *(result+0)=*(boosted_p+0); *(result+1)=*(boosted_p+1); *(result+2)=*(boosted_p+2); *(result+3)=*(boosted_p+3); //free up memory //free(boosted_p); gsl_matrix_free (lambda1); gsl_vector_free(p_ph_prime); } double *zeroNorm(double *p_ph) { //ensures zero norm condition of photon 4 monetum is held int i=0; double normalizing_factor=0; gsl_vector_view p=gsl_vector_view_array((p_ph+1), 3); //make last 3 elements of p_ph pointer into vector if (*(p_ph+0) != gsl_blas_dnrm2(&p.vector ) ) { normalizing_factor=(gsl_blas_dnrm2(&p.vector )); //fprintf(fPtr,"in zero norm if\n"); //fflush(fPtr); //go through and correct 4 momentum assuming the energy is correct *(p_ph+1)= ((*(p_ph+1))/(normalizing_factor))*(*(p_ph+0)); *(p_ph+2)= ((*(p_ph+2))/(normalizing_factor))*(*(p_ph+0)); *(p_ph+3)= ((*(p_ph+3))/(normalizing_factor))*(*(p_ph+0)); } /* if (pow((*(p_ph+0)),2) != ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) ) { printf("This isnt normalized in the function\nThe difference is: %e\n", pow((*(p_ph+0)),2) - ( pow((*(p_ph+1)),2)+pow((*(p_ph+2)),2)+pow((*(p_ph+3)),2) ) ); } */ //normalized within a factor of 10^-53 return p_ph; } int findNearestPropertiesAndMinMFP( struct photon *ph, int num_ph, double *all_time_steps, int *sorted_indexes, struct hydro_dataframe *hydro_data, gsl_rng * rand, int find_nearest_block_switch, FILE *fPtr) { int i=0, min_index=0, ph_block_index=0, num_thread=1, thread_id=0; double ph_x=0, ph_y=0, ph_phi=0, ph_z=0, ph_r=0, ph_theta=0; double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates double ph_v_norm=0, fl_v_norm=0, synch_x_sect=0; double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_vz_tmp=0, n_temp_tmp=0 ; double rnd_tracker=0, n_dens_min=0, n_vx_min=0, n_vy_min=0, n_vz_min=0, n_temp_min=0; #if defined(_OPENMP) num_thread=omp_get_num_threads(); //default is one above if theres no openmp usage #endif bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block int index=0, num_photons_find_new_element=0; double mfp=0,min_mfp=0, beta=0; double el_p[4]; double ph_p_comv[4], ph_p[4], fluid_beta[3], photon_hydro_coord[3]; //initialize gsl random number generator fo each thread const gsl_rng_type *rng_t; gsl_rng **rng; gsl_rng_env_setup(); rng_t = gsl_rng_ranlxs0; rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); rng[0]=rand; //#pragma omp parallel for num_threads(nt) for(i=1;i<num_thread;i++) { rng[i] = gsl_rng_alloc (rng_t); gsl_rng_set(rng[i],gsl_rng_get(rand)); } //go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away //can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius //or just parallelize this part here min_mfp=1e12; #pragma omp parallel for num_threads(num_thread) firstprivate( is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, ph_r, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker, ph_p_comv, el_p, ph_p, fluid_beta) private(i) shared(min_mfp ) reduction(+:num_photons_find_new_element) for (i=0;i<num_ph; i++) { //fprintf(fPtr, "%d, %d,%e\n", i, ((ph+i)->nearest_block_index), ((ph+i)->weight)); //fflush(fPtr); if (find_nearest_block_switch==0) { ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault here } else { ph_block_index=0; // therefore if starting a new frame set index=0 to avoid this issue } mcratCoordinateToHydroCoordinate(&photon_hydro_coord, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2);//convert the photons coordinate to the hydro sim coordinate system //printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y); //if the location of the photon is inside the domain of the hydro simulation then do all of this, otherwise assign huge mfp value so no scattering occurs and the next frame is loaded // absorbed photons have ph_block_index=-1, therefore if this value is not less than 0, calulate the mfp properly but doesnt work when go to new frame and find new indexes (will change b/c will get rid of these photons when printing) //alternatively make decision based on 0 weight #if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE if (((photon_hydro_coord[1]<(hydro_data->r1_domain)[1]) && (photon_hydro_coord[1]>(hydro_data->r1_domain)[0]) && (photon_hydro_coord[0]<(hydro_data->r0_domain)[1]) && (photon_hydro_coord[0]>(hydro_data->r0_domain)[0])) && ((ph+i)->nearest_block_index != -1) ) //can use sorted index to see which photons have been absorbed efficiently before printing and get the indexes #else if (((photon_hydro_coord[2]<(hydro_data->r2_domain)[1]) && (photon_hydro_coord[2]>(hydro_data->r2_domain)[0]) && (photon_hydro_coord[1]<(hydro_data->r1_domain)[1]) && (photon_hydro_coord[1]>(hydro_data->r1_domain)[0]) && (photon_hydro_coord[0]<(hydro_data->r0_domain)[1]) && (photon_hydro_coord[0]>(hydro_data->r0_domain)[0])) && ((ph+i)->nearest_block_index != -1) ) #endif { is_in_block=checkInBlock(photon_hydro_coord[0], photon_hydro_coord[1], photon_hydro_coord[2], hydro_data, ph_block_index); //when rebinning photons can have comoving 4 momenta=0 and nearest_block_index=0 (and block 0 be the actual block the photon is in making it not refind the proper index and reclaulate the comoving 4 momenta) which can make counting synch scattered photons be thrown off, thus take care of this case by forcing the function to recalc things #if CYCLOSYNCHROTRON_SWITCH == ON if ((ph_block_index==0) && ( ((ph+i)->comv_p0)+((ph+i)->comv_p1)+((ph+i)->comv_p2)+((ph+i)->comv_p3) == 0 ) ) { is_in_block=0; //say that photon is not in the block, force it to recompute things } #endif if (find_nearest_block_switch==0 && is_in_block) { //keep the saved grid index min_index=ph_block_index; } else { //find the new index of the block closest to the photon //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh //find the new index of the block that the photon is actually in min_index=findContainingBlock(photon_hydro_coord[0], photon_hydro_coord[1], photon_hydro_coord[2], hydro_data, fPtr); //(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr); if (min_index != -1) { (ph+i)->nearest_block_index=min_index; //save the index if min_index != -1 //also recalculate the photons' comoving frequency in this new fluid element ph_p[0]=((ph+i)->p0); ph_p[1]=((ph+i)->p1); ph_p[2]=((ph+i)->p2); ph_p[3]=((ph+i)->p3); #if DIMENSIONS == THREE hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], (hydro_data->r2)[min_index]); #elif DIMENSIONS == TWO_POINT_FIVE ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi); #else ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); //this may have to change if PLUTO can save vectors in 3D when conidering 2D sim hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], 0, (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi); #endif lorentzBoost(&fluid_beta, &ph_p, &ph_p_comv, 'p', fPtr); ((ph+i)->comv_p0)=ph_p_comv[0]; ((ph+i)->comv_p1)=ph_p_comv[1]; ((ph+i)->comv_p2)=ph_p_comv[2]; ((ph+i)->comv_p3)=ph_p_comv[3]; num_photons_find_new_element+=1; } else { fprintf(fPtr, "Photon number %d FLASH index not found, making sure it doesnt scatter.\n", i); } } //if min_index!= -1 (know which fluid element photon is in) do all this stuff, otherwise make sure photon doesnt scatter if (min_index != -1) { //fprintf(fPtr,"Min Index: %d\n", min_index); //save values (n_dens_lab_tmp)= (hydro_data->dens_lab)[min_index];//(*(dens_lab+min_index)); (n_temp_tmp)= (hydro_data->temp)[min_index];//(*(temp+min_index)); #if DIMENSIONS == THREE hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], (hydro_data->r2)[min_index]); #elif DIMENSIONS == TWO_POINT_FIVE ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], (hydro_data->v2)[min_index], (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi); #else ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); //this may have to change if PLUTO can save vectors in 3D when conidering 2D sim hydroVectorToCartesian(&fluid_beta, (hydro_data->v0)[min_index], (hydro_data->v1)[min_index], 0, (hydro_data->r0)[min_index], (hydro_data->r1)[min_index], ph_phi); #endif fl_v_x=fluid_beta[0]; fl_v_y=fluid_beta[1]; fl_v_z=fluid_beta[2]; fl_v_norm=sqrt(fl_v_x*fl_v_x+fl_v_y*fl_v_y+fl_v_z*fl_v_z); ph_v_norm=sqrt(((ph+i)->p1)*((ph+i)->p1)+((ph+i)->p2)*((ph+i)->p2)+((ph+i)->p3)*((ph+i)->p3)); //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product n_cosangle=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined beta=sqrt(1.0-1.0/((hydro_data->gamma)[min_index]*(hydro_data->gamma)[min_index])); //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case rnd_tracker=0; #if defined(_OPENMP) thread_id=omp_get_thread_num(); #endif rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]); //printf("Rnd_tracker: %e Thread number %d \n",rnd_tracker, omp_get_thread_num() ); //mfp=(-1)*log(rnd_tracker)*(M_P/((n_dens_tmp))/(THOM_X_SECT)); ///(1.0-beta*((n_cosangle)))) ; // the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths IN COMOV FRAME for reference mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*n_cosangle))*log(rnd_tracker) ; } else { mfp=min_mfp; } } else { mfp=min_mfp; //fprintf(fPtr,"Photon %d In ELSE\n", i); //exit(0); } *(all_time_steps+i)=mfp/C_LIGHT; //fprintf(fPtr,"Photon %d has time %e\n", i, *(all_time_steps+i)); //fflush(fPtr); } //exit(0); //free rand number generator for (i=1;i<num_thread;i++) { gsl_rng_free(rng[i]); } free(rng); //printf("HERE\n"); for (i=0;i<num_ph;i++) { *(sorted_indexes+i)= i; //save indexes to array to use in qsort } //printf("before QSORT\n"); #if (defined _GNU_SOURCE || defined __GNU__ || defined __linux__) qsort_r(sorted_indexes, num_ph, sizeof (int), compare2, all_time_steps); #elif (defined __APPLE__ || defined __MACH__ || defined __DARWIN__ || defined __FREEBSD__ || defined __BSD__ || defined OpenBSD3_1 || defined OpenBSD3_9) qsort_r(sorted_indexes, num_ph, sizeof (int), all_time_steps, compare); #else #error Cannot detect operating system #endif //print number of times we had to refind the index of the elemtn photons were located in if (find_nearest_block_switch!=0) { num_photons_find_new_element=0; //force this to be 0 since we forced MCRaT to find the indexes for all the photons here } return num_photons_find_new_element; } int compare (void *ar, const void *a, const void *b) { //from https://phoxis.org/2012/07/12/get-sorted-index-orderting-of-an-array/ int aa = *(int *) a; int bb = *(int *) b; double *arr=NULL; arr=ar; //printf("%d, %d\n", aa, bb); //printf("%e, %e\n", arr[aa] , arr[bb]); //return (aa - bb); /* if (arr[aa] < arr[bb]) return -1; if (arr[aa] == arr[bb]) return 0; if (arr[aa] > arr[bb]) return 1; */ return ((arr[aa] > arr[bb]) - (arr[aa] < arr[bb])); } int compare2 ( const void *a, const void *b, void *ar) { //have 2 compare funcions b/c of changes in qsort_r between BSD and GNU //from https://phoxis.org/2012/07/12/get-sorted-index-orderting-of-an-array/ int aa = *(int *) a; int bb = *(int *) b; double *arr=NULL; arr=ar; return ((arr[aa] > arr[bb]) - (arr[aa] < arr[bb])); } int interpolatePropertiesAndMinMFP( struct photon *ph, int num_ph, int array_num, double *time_step, double *x, double *y, double *z, double *szx, double *szy, double *velx, double *vely, double *velz, double *dens_lab,\ double *temp, double *n_dens_lab, double *n_vx, double *n_vy, double *n_vz, double *n_temp, gsl_rng * rand, int find_nearest_block_switch, FILE *fPtr) { /* * THIS FUNCTION IS WRITTEN JUST FOR 2D SIMS AS OF NOW, not used */ /* int i=0, j=0, min_index=0, ph_block_index=0, thread_id=0; int left_block_index=0, right_block_index=0, bottom_block_index=0, top_block_index=0, all_adjacent_block_indexes[4]; double ph_x=0, ph_y=0, ph_phi=0, ph_z=0, dist=0, left_dist_min=0, right_dist_min=0, top_dist_min=0, bottom_dist_min=0, dv=0, v=0; double fl_v_x=0, fl_v_y=0, fl_v_z=0; //to hold the fluid velocity in MCRaT coordinates double r=0, theta=0; double ph_v_norm=0, fl_v_norm=0; double n_cosangle=0, n_dens_lab_tmp=0,n_vx_tmp=0, n_vy_tmp=0, n_vz_tmp=0, n_temp_tmp=0; double rnd_tracker=0, n_dens_lab_min=0, n_vx_min=0, n_vy_min=0, n_vz_min=0, n_temp_min=0; int num_thread=2;//omp_get_max_threads(); bool is_in_block=0; //boolean to determine if the photon is outside of its previously noted block int index=0; double mfp=0,min_mfp=0, beta=0; //initialize gsl random number generator fo each thread const gsl_rng_type *rng_t; gsl_rng **rng; gsl_rng_env_setup(); rng_t = gsl_rng_ranlxs0; rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *)); rng[0]=rand; //#pragma omp parallel for num_threads(nt) for(i=1;i<num_thread;i++) { rng[i] = gsl_rng_alloc (rng_t); gsl_rng_set(rng[i],gsl_rng_get(rand)); } //go through each photon and find the blocks around it and then get the distances to all of those blocks and choose the one thats the shortest distance away //can optimize here, exchange the for loops and change condition to compare to each of the photons is the radius of the block is .95 (or 1.05) times the min (max) photon radius //or just parallelize this part here min_mfp=1e12; #pragma omp parallel for num_threads(num_thread) firstprivate( r, theta,dv, v, all_adjacent_block_indexes, j, left_block_index, right_block_index, top_block_index, bottom_block_index, is_in_block, ph_block_index, ph_x, ph_y, ph_z, ph_phi, min_index, n_dens_lab_tmp,n_vx_tmp, n_vy_tmp, n_vz_tmp, n_temp_tmp, fl_v_x, fl_v_y, fl_v_z, fl_v_norm, ph_v_norm, n_cosangle, mfp, beta, rnd_tracker) private(i) shared(min_mfp ) for (i=0;i<num_ph; i++) { //printf("%d, %e,%e\n", i, ((ph+i)->r0), ((ph+i)->r1)); if (find_nearest_block_switch==0) { ph_block_index=(ph+i)->nearest_block_index; //if starting a new frame the number of indexes can change and cause a seg fault } else { ph_block_index=0; //if starting a new frame set index=0 to avoid this issue } //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { ph_x=pow(pow(((ph+i)->r0),2.0)+pow(((ph+i)->r1),2.0), 0.5); //convert back to FLASH x coordinate ph_y=((ph+i)->r2); ph_phi=atan2(((ph+i)->r1), ((ph+i)->r0)); } #else { ph_x=((ph+i)->r0); ph_y=((ph+i)->r1); ph_z=((ph+i)->r2); } #endif //printf("ph_x:%e, ph_y:%e\n", ph_x, ph_y); is_in_block=checkInBlock(ph_block_index, ph_x, ph_y, ph_z, x, y, z, szx, szy); if (find_nearest_block_switch==0 && is_in_block) { //keep the saved grid index min_index=ph_block_index; } else { //find the new index of the block closest to the photon //min_index=findNearestBlock(array_num, ph_x, ph_y, ph_z, x, y, z); //stop doing this one b/c nearest grid could be one that the photon isnt actually in due to adaptive mesh //find the new index of the block that the photon is actually in //min_index=findContainingBlock(array_num, ph_x, ph_y, ph_z, x, y, z, szx, szy, ph_block_index, find_nearest_block_switch, fPtr); (ph+i)->nearest_block_index=min_index; //save the index } //look for the blocks surounding the block of interest and order them by the left_dist_min=1e15;//set dist to impossible value to make sure at least first distance calulated is saved right_dist_min=1e15; top_dist_min=1e15; bottom_dist_min=1e15; for (j=0;j<array_num;j++) { //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { dist= pow(pow((*(x+min_index))- (*(x+j)), 2.0) + pow((*(y+min_index))- (*(y+j)) , 2.0),0.5); } #else { dist= pow(pow((*(x+min_index))- (*(x+j)), 2.0) + pow((*(y+min_index))- (*(y+j)),2.0 ) + pow((*(z+min_index))- (*(z+j)) , 2.0),0.5); } #endif if ((*(x+j))<(*(x+min_index)) && (dist < left_dist_min) ) { left_block_index=j; left_dist_min=dist; } else if ((*(x+j))>(*(x+min_index)) && (dist < right_dist_min)) { right_block_index=j; right_dist_min=dist; } if ((*(y+j))<(*(y+min_index)) && (dist < bottom_dist_min) ) { bottom_block_index=j; bottom_dist_min=dist; } else if ((*(y+j))>(*(y+min_index)) && (dist < top_dist_min) ) { top_block_index=j; top_dist_min=dist; } } all_adjacent_block_indexes[0]=left_block_index; all_adjacent_block_indexes[1]=right_block_index; all_adjacent_block_indexes[2]=bottom_block_index; all_adjacent_block_indexes[3]=top_block_index; //do a weighted average of the 4 nearest grids based on volume v=0; (n_dens_lab_tmp)=0; (n_vx_tmp)= 0; (n_vy_tmp)= 0; (n_temp_tmp)= 0; (n_vz_tmp)= 0; for (j=0;j<4;j++) { #if SIM_SWITCH == RIKEN { r=pow(pow((*(x+all_adjacent_block_indexes[j])),2.0)+pow((*(y+all_adjacent_block_indexes[j])),2.0), 0.5); theta=atan2((*(x+all_adjacent_block_indexes[j])), (*(y+all_adjacent_block_indexes[j]))); dv=2.0*M_PI*pow(r,2)*sin(theta)*(*(szx+all_adjacent_block_indexes[j]))*(*(szy+all_adjacent_block_indexes[j])) ; } #else { //using FLASH dv=2.0*M_PI*(*(x+all_adjacent_block_indexes[j]))*pow(*(szx+all_adjacent_block_indexes[j]),2.0) ; } #endif v+=dv; //save values (n_dens_lab_tmp)+= (*(dens_lab+all_adjacent_block_indexes[j]))*dv; (n_vx_tmp)+= (*(velx+all_adjacent_block_indexes[j]))*dv; (n_vy_tmp)+= (*(vely+all_adjacent_block_indexes[j]))*dv; (n_temp_tmp)+= (*(temp+all_adjacent_block_indexes[j]))*dv; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { (n_vz_tmp)+= (*(velz+all_adjacent_block_indexes[j]))*dv; } #endif } //fprintf(fPtr,"Outside\n"); //save values (n_dens_lab_tmp)/= v; (n_vx_tmp)/= v; (n_vy_tmp)/= v; (n_temp_tmp)/= v; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { (n_vz_tmp)/= v; } #endif //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { fl_v_x=n_vx_tmp*cos(ph_phi); fl_v_y=n_vx_tmp*sin(ph_phi); fl_v_z=n_vy_tmp; } #else { fl_v_x=n_vx_tmp; fl_v_y=n_vy_tmp; fl_v_z=n_vz_tmp; } #endif fl_v_norm=pow(pow(fl_v_x, 2.0)+pow(fl_v_y, 2.0)+pow(fl_v_z, 2.0), 0.5); ph_v_norm=pow(pow(((ph+i)->p1), 2.0)+pow(((ph+i)->p2), 2.0)+pow(((ph+i)->p3), 2.0), 0.5); //(*(n_cosangle+i))=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //find cosine of the angle between the photon and the fluid velocities via a dot product (n_cosangle)=((fl_v_x* ((ph+i)->p1))+(fl_v_y* ((ph+i)->p2))+(fl_v_z* ((ph+i)->p3)))/(fl_v_norm*ph_v_norm ); //make 1 for cylindrical otherwise its undefined //if (strcmp(DIM_SWITCH, dim_2d_str)==0) #if DIMENSIONS == 2 { beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)),0.5); } #else { beta=pow((pow((n_vx_tmp),2)+pow((n_vy_tmp),2)+pow((n_vz_tmp),2)),0.5); } #endif //put this in to double check that random number is between 0 and 1 (exclusive) because there was a problem with this for parallel case rnd_tracker=0; #if defined(_OPENMP) thread_id=omp_get_thread_num(); #endif rnd_tracker=gsl_rng_uniform_pos(rng[thread_id]); mfp=(-1)*(M_P/((n_dens_lab_tmp))/THOM_X_SECT/(1.0-beta*((n_cosangle))))*log(rnd_tracker) ; //calulate the mfp and then multiply it by the ln of a random number to simulate distribution of mean free paths #pragma omp critical if ( mfp<min_mfp) { min_mfp=mfp; n_dens_lab_min= n_dens_lab_tmp; n_vx_min= n_vx_tmp; n_vy_min= n_vy_tmp; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { n_vz_min= n_vz_tmp; } #endif n_temp_min= n_temp_tmp; index=i; //fprintf(fPtr, "Thread is %d. new min: %e for photon %d with block properties: %e, %e, %e Located at: %e, %e, Dist: %e\n", omp_get_thread_num(), mfp, index, n_vx_tmp, n_vy_tmp, n_temp_tmp, *(x+min_index), *(y+min_index), dist_min); //fflush(fPtr); #pragma omp flush(min_mfp) } } //free rand number generator for (i=1;i<num_thread;i++) { gsl_rng_free(rng[i]); } free(rng); *(n_dens_lab)= n_dens_lab_min; *(n_vx)= n_vx_min; *(n_vy)= n_vy_min; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) #if DIMENSIONS == 3 { *(n_vz)= n_vz_min; } #endif *(n_temp)= n_temp_min; (*time_step)=min_mfp/C_LIGHT; return index; */ return 0; } void updatePhotonPosition(struct photon *ph, int num_ph, double t, FILE *fPtr) { //move photons by speed of light int i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double old_position=0, new_position=0, divide_p0=0; #pragma omp parallel for num_threads(num_thread) firstprivate(old_position, new_position, divide_p0) for (i=0;i<num_ph;i++) { if (((ph+i)->type != CS_POOL_PHOTON) && ((ph+i)->weight != 0)) { old_position= sqrt(((ph+i)->r0)*((ph+i)->r0)+((ph+i)->r1)*((ph+i)->r1)+((ph+i)->r2)*((ph+i)->r2)); //uncommented checks since they were not necessary anymore divide_p0=1.0/((ph+i)->p0); ((ph+i)->r0)+=((ph+i)->p1)*divide_p0*C_LIGHT*t; //update x position ((ph+i)->r1)+=((ph+i)->p2)*divide_p0*C_LIGHT*t;//update y ((ph+i)->r2)+=((ph+i)->p3)*divide_p0*C_LIGHT*t;//update z new_position= sqrt(((ph+i)->r0)*((ph+i)->r0)+((ph+i)->r1)*((ph+i)->r1)+((ph+i)->r2)*((ph+i)->r2)); /* if ((new_position-old_position)/t > C_LIGHT) { fprintf(fPtr, "PHOTON NUMBER %d IS SUPERLUMINAL. ITS SPEED IS %e c.\n", i, ((new_position-old_position)/t)/C_LIGHT); } */ //if ( (ph+i)->s0 != 1) { // fprintf(fPtr, "PHOTON NUMBER %d DOES NOT HAVE I=1. Instead it is: %e\n", i, (ph+i)->s0); } //printf("In update function: %e, %e, %e, %e, %e, %e, %e\n",((ph+i)->r0), ((ph+i)->r1), ((ph+i)->r2), t, ((ph+i)->p1)/((ph+i)->p0), ((ph+i)->p2)/((ph+i)->p0), ((ph+i)->p3)/((ph+i)->p0) ); } } //printf("In update function: %e, %e, %e, %e\n",t, ((ph)->p1)/((ph)->p0), ((ph)->p2)/((ph)->p0), ((ph)->p3)/((ph)->p0) ); } double photonEvent(struct photon *ph, int num_ph, double dt_max, double *all_time_steps, int *sorted_indexes, struct hydro_dataframe *hydro_data, int *scattered_ph_index, int *frame_scatt_cnt, int *frame_abs_cnt, gsl_rng * rand, FILE *fPtr)//(struct photon *ph, int num_ph, double dt_max, double *all_time_steps, int *sorted_indexes, double *all_flash_vx, double *all_flash_vy, double *all_flash_vz, double *all_fluid_temp, int *scattered_ph_index, int *frame_scatt_cnt, int *frame_abs_cnt, gsl_rng * rand, FILE *fPtr) { //function to perform single photon scattering int i=0, index=0, ph_index=0, event_did_occur=0; //variable event_did_occur is to keep track of wether a scattering or absorption actually occured or not, double scatt_time=0, old_scatt_time=0; //keep track of new time to scatter vs old time to scatter to know how much to incrementally propagate the photons if necessary double phi=0, theta=0; //phi and theta for the 4 momentum double ph_phi=0, flash_vx=0, flash_vy=0, flash_vz=0, fluid_temp=0; double *ph_p=malloc(4*sizeof(double)); //pointer to hold only photon 4 momentum @ start double *el_p_comov=malloc(4*sizeof(double));//pointer to hold the electron 4 momenta in comoving frame double *ph_p_comov=malloc(4*sizeof(double));//pointer to hold the comoving photon 4 momenta double *fluid_beta=malloc(3*sizeof(double));//pointer to hold fluid velocity vector double *negative_fluid_beta=malloc(3*sizeof(double));//pointer to hold negative fluid velocity vector double *s=malloc(4*sizeof(double)); //vector to hold the stokes parameters for a given photon i=0; old_scatt_time=0; event_did_occur=0; //fprintf(fPtr,"In this function Num_ph %d\n", num_ph); //fflush(fPtr); while (i<num_ph && event_did_occur==0 ) { ph_index=(*(sorted_indexes+i)); scatt_time= *(all_time_steps+ph_index); //get the time until the photon scatters //IF THE TIME IS GREATER THAN dt_max dont let the photons positions be updated if (scatt_time<dt_max) { updatePhotonPosition(ph, num_ph, scatt_time-old_scatt_time, fPtr); //fprintf(fPtr,"i: %d, Photon: %d, Delta t=%e\n", i, ph_index, scatt_time-old_scatt_time); //fflush(fPtr); //WHAT IF THE PHOTON MOVES TO A NEW BLOCK BETWEEN WHEN WE CALC MFP AND MOVE IT TO DO THE SCATTERING???? //it mostly happens at low optical depth, near the photosphere so we would have a large mfp anyways so we probably wouldn't be in this function in that case index=(ph+ph_index)->nearest_block_index; //the sorted_indexes gives index of photon with smallest time to potentially scatter then extract the index of the block closest to that photon fluid_temp=(hydro_data->temp)[index]; //if (strcmp(DIM_SWITCH, dim_3d_str)==0) ph_phi=atan2(((ph+ph_index)->r1), (((ph+ph_index)->r0))); /* if (isnan((ph+ph_index)->r0) || isnan((ph+ph_index)->r1) || isnan((ph+ph_index)->r2)) { printf("Not a number\n"); } fprintf(fPtr,"ph_phi=%e\n", ph_phi); fflush(fPtr); */ //convert flash coordinated into MCRaT coordinates //printf("Getting fluid_beta\n"); #if DIMENSIONS == THREE hydroVectorToCartesian(fluid_beta, (hydro_data->v0)[index], (hydro_data->v1)[index], (hydro_data->v2)[index], (hydro_data->r0)[index], (hydro_data->r1)[index], (hydro_data->r2)[index]); #elif DIMENSIONS == TWO_POINT_FIVE hydroVectorToCartesian(fluid_beta, (hydro_data->v0)[index], (hydro_data->v1)[index], (hydro_data->v2)[index], (hydro_data->r0)[index], (hydro_data->r1)[index], ph_phi); #else //this may have to change if PLUTO can save vectors in 3D when conidering 2D sim hydroVectorToCartesian(fluid_beta, (hydro_data->v0)[index], (hydro_data->v1)[index], 0, (hydro_data->r0)[index], (hydro_data->r1)[index], ph_phi); #endif /* fprintf(fPtr,"FLASH v: %e, %e\n", flash_vx,flash_vy); fflush(fPtr); */ //fill in photon 4 momentum *(ph_p+0)=((ph+ph_index)->p0); *(ph_p+1)=((ph+ph_index)->p1); *(ph_p+2)=((ph+ph_index)->p2); *(ph_p+3)=((ph+ph_index)->p3); //first we bring the photon to the fluid's comoving frame //already have comoving 4 momentum *(ph_p_comov+0)=((ph+ph_index)->comv_p0); *(ph_p_comov+1)=((ph+ph_index)->comv_p1); *(ph_p_comov+2)=((ph+ph_index)->comv_p2); *(ph_p_comov+3)=((ph+ph_index)->comv_p3); //fill in stokes parameters *(s+0)=((ph+ph_index)->s0); //I ==1 *(s+1)=((ph+ph_index)->s1); //Q/I *(s+2)=((ph+ph_index)->s2); //U/I *(s+3)=((ph+ph_index)->s3); //V/I /* if (((ph+ph_index)->type) == COMPTONIZED_PHOTON) { fprintf(fPtr,"Unscattered Photon in Lab frame: %e, %e, %e,%e\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3), (ph->r0), (ph->r1), (ph->r2), *(s+0), *(s+1), *(s+2), *(s+3)); fflush(fPtr); fprintf(fPtr,"Fluid Beta: %e, %e, %e\n", *(fluid_beta+0),*(fluid_beta+1), *(fluid_beta+2)); fflush(fPtr); } fprintf(fPtr,"Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3); fflush(fPtr); if (((ph+ph_index)->type) == COMPTONIZED_PHOTON) { fprintf(fPtr, "Before Scattering, In Comov_frame:\n"); fflush(fPtr); fprintf(fPtr, "ph_comov: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3)); fflush(fPtr); } */ //then rotate the stokes plane by some angle such that we are in the stokes coordinat eystsem after the lorentz boost #if STOKES_SWITCH == ON { stokesRotation(fluid_beta, (ph_p+1), (ph_p_comov+1), s, fPtr); } #endif //exit(0); //second we generate a thermal electron at the correct temperature singleElectron(el_p_comov, fluid_temp, ph_p_comov, rand, fPtr); /* if (((ph+ph_index)->type) == COMPTONIZED_PHOTON) { fprintf(fPtr,"el_comov: %e, %e, %e,%e\n", *(el_p_comov+0), *(el_p_comov+1), *(el_p_comov+2), *(el_p_comov+3)); fflush(fPtr); } */ //third we perform the scattering and save scattered photon 4 monetum in ph_p_comov @ end of function event_did_occur=singleScatter(el_p_comov, ph_p_comov, s, rand, fPtr); /* if (((ph+ph_index)->type) == COMPTONIZED_PHOTON) { fprintf(fPtr,"After Scattering, After Lorentz Boost to Comov frame: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3)); fflush(fPtr); } */ if (event_did_occur==1) { //fprintf(fPtr,"Within the if!\n"); //fflush(fPtr); //if the scattering occured have to uodate the phtoon 4 momentum. if photon didnt scatter nothing changes //fourth we bring the photon back to the lab frame *(negative_fluid_beta+0)=-1*( *(fluid_beta+0)); *(negative_fluid_beta+1)=-1*( *(fluid_beta+1)); *(negative_fluid_beta+2)=-1*( *(fluid_beta+2)); lorentzBoost(negative_fluid_beta, ph_p_comov, ph_p, 'p', fPtr); /* if (((ph+ph_index)->type) == COMPTONIZED_PHOTON) { fprintf(fPtr,"Scattered Photon in Lab frame: %e, %e, %e,%e\n\n", *(ph_p+0), *(ph_p+1), *(ph_p+2), *(ph_p+3)); fflush(fPtr); } */ #if STOKES_SWITCH == ON { stokesRotation(negative_fluid_beta, (ph_p_comov+1), (ph_p+1), s, fPtr); //rotate to boost back to lab frame //save stokes parameters ((ph+ph_index)->s0)= *(s+0); //I ==1 ((ph+ph_index)->s1)= *(s+1); ((ph+ph_index)->s2)= *(s+2); ((ph+ph_index)->s3)= *(s+3); } #endif if (((*(ph_p+0))*C_LIGHT/1.6e-9) > 1e4) { //energy greater than 1e4 keV fprintf(fPtr,"Extremely High Photon Energy!!!!!!!!\n"); fflush(fPtr); } //fprintf(fPtr,"Old: %e, %e, %e,%e\n", ph->p0, ph->p1, ph->p2, ph->p3); //fprintf(fPtr, "Old: %e, %e, %e,%e\n", *(ph_p_comov+0), *(ph_p_comov+1), *(ph_p_comov+2), *(ph_p_comov+3)); //assign the photon its new lab 4 momentum ((ph+ph_index)->p0)=(*(ph_p+0)); ((ph+ph_index)->p1)=(*(ph_p+1)); ((ph+ph_index)->p2)=(*(ph_p+2)); ((ph+ph_index)->p3)=(*(ph_p+3)); //assign it the comoving frame 4 momentum ((ph+ph_index)->comv_p0)=(*(ph_p_comov+0)); ((ph+ph_index)->comv_p1)=(*(ph_p_comov+1)); ((ph+ph_index)->comv_p2)=(*(ph_p_comov+2)); ((ph+ph_index)->comv_p3)=(*(ph_p_comov+3)); //printf("Done assigning values to original struct\n"); //incremement that photons number of scatterings ((ph+ph_index)->num_scatt)+=1; *frame_scatt_cnt+=1; //incrememnt total number of scatterings } } else { // if the photon scatt_time > dt_max //have to adjust the time properly so that the time si now appropriate for the next frame scatt_time=dt_max; updatePhotonPosition(ph, num_ph, scatt_time-old_scatt_time, fPtr); event_did_occur=1; //set equal to 1 to get out of the loop b/c other subsequent photons will have scatt_time > dt_max } old_scatt_time=scatt_time; i++; } //exit(0); *scattered_ph_index=ph_index; //save the index of the photon that was scattered //fprintf(fPtr,"scattered_ph_index: %d %d\n", *scattered_ph_index, (*(sorted_indexes+i-1))); //fflush(fPtr); free(el_p_comov); free(ph_p_comov); free(fluid_beta); free(negative_fluid_beta); free(ph_p); free(s); ph_p=NULL;negative_fluid_beta=NULL;ph_p_comov=NULL; el_p_comov=NULL; //retrun total time elapsed to scatter a photon return scatt_time; } void singleElectron(double *el_p, double temp, double *ph_p, gsl_rng * rand, FILE *fPtr) { //generates an electron with random energy double factor=0, gamma=0; double y_dum=0, f_x_dum=0, x_dum=0, beta_x_dum=0, beta=0, phi=0, theta=0, ph_theta=0, ph_phi=0; gsl_matrix *rot= gsl_matrix_calloc (3, 3); //create matrix thats 3x3 to do rotation gsl_vector_view el_p_prime ; //create vector to hold rotated electron 4 momentum gsl_vector *result=gsl_vector_alloc (3); //fprintf(fPtr, "Temp in singleElectron: %e\n", temp); if (temp>= 1e7) { //printf("In if\n"); factor=K_B*temp/(M_EL*C_LIGHT*C_LIGHT); y_dum=1; //initalize loop to get a random gamma from the distribution of electron velocities f_x_dum=0; while ((isnan(f_x_dum) !=0) || (y_dum>f_x_dum) ) { x_dum=gsl_rng_uniform_pos(rand)*(1+100*factor); beta_x_dum=sqrt(1-(1/(x_dum*x_dum))); y_dum=gsl_rng_uniform(rand)/2.0; f_x_dum=x_dum*x_dum*(beta_x_dum/gsl_sf_bessel_Kn (2, 1.0/factor))*exp(-1*x_dum/factor); // //fprintf(fPtr,"Choosing a Gamma: xdum: %e, f_x_dum: %e, y_dum: %e\n", x_dum, f_x_dum, y_dum); } gamma=x_dum; } else { //printf("In else\n"); factor=sqrt(K_B*temp/M_EL); //calculate a random gamma from 3 random velocities drawn from a gaussian distribution with std deviation of "factor" gamma=1.0/sqrt( 1- (pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+ pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2)+pow(gsl_ran_gaussian(rand, factor)/C_LIGHT, 2) )); //each vel direction is normal distribution -> maxwellian when multiplied } //fprintf(fPtr,"Chosen Gamma: %e\n",gamma); beta=sqrt( 1- (1/(gamma*gamma)) ); //printf("Beta is: %e in singleElectron\n", beta); phi=gsl_rng_uniform(rand)*2*M_PI; y_dum=1; //initalize loop to get a random theta f_x_dum=0; while (y_dum>f_x_dum) { y_dum=gsl_rng_uniform(rand)*1.3; x_dum=gsl_rng_uniform(rand)*M_PI; f_x_dum=sin(x_dum)*(1-(beta*cos(x_dum))); } theta=x_dum; //fprintf(fPtr,"Beta: %e\tPhi: %e\tTheta: %e\n",beta,phi, theta); //fill in electron 4 momentum NOT SURE WHY THE ORDER IS AS SUCH SEEMS TO BE E/c, pz,py,px!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! *(el_p+0)=gamma*(M_EL)*(C_LIGHT); *(el_p+1)=gamma*(M_EL)*(C_LIGHT)*beta*cos(theta); *(el_p+2)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*sin(phi); *(el_p+3)=gamma*(M_EL)*(C_LIGHT)*beta*sin(theta)*cos(phi); //printf("Old: %e, %e, %e,%e\n", *(el_p+0), *(el_p+1), *(el_p+2), *(el_p+3)); el_p_prime=gsl_vector_view_array((el_p+1), 3); //find angles of photon NOT SURE WHY WERE CHANGING REFERENCE FRAMES HERE???!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ph_phi=atan2(*(ph_p+2), *(ph_p+3)); //Double Check ph_theta=atan2(sqrt( pow(*(ph_p+2),2)+ pow(*(ph_p+3),2)) , (*(ph_p+1)) ); //printf("Calculated Photon phi and theta in singleElectron:%e, %e\n", ph_phi, ph_theta); //fill in rotation matrix to rotate around x axis to get rid of phi angle gsl_matrix_set(rot, 1,1,1); gsl_matrix_set(rot, 2,2,cos(ph_theta)); gsl_matrix_set(rot, 0,0,cos(ph_theta)); gsl_matrix_set(rot, 0,2,-sin(ph_theta)); gsl_matrix_set(rot, 2,0,sin(ph_theta)); gsl_blas_dgemv(CblasNoTrans, 1, rot, &el_p_prime.vector, 0, result); /* printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2)); printf("Middle: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(result,0), gsl_vector_get(result,1), gsl_vector_get(result,2)); */ gsl_matrix_set_all(rot,0); gsl_matrix_set(rot, 0,0,1); gsl_matrix_set(rot, 1,1,cos(-ph_phi)); gsl_matrix_set(rot, 2,2,cos(-ph_phi)); gsl_matrix_set(rot, 1,2,-sin(-ph_phi)); gsl_matrix_set(rot, 2,1,sin(-ph_phi)); gsl_blas_dgemv(CblasNoTrans, 1, rot, result, 0, &el_p_prime.vector); /* printf("Rotation Matrix 0: %e,%e, %e\n", gsl_matrix_get(rot, 0,0), gsl_matrix_get(rot, 0,1), gsl_matrix_get(rot, 0,2)); printf("Rotation Matrix 1: %e,%e, %e\n", gsl_matrix_get(rot, 1,0), gsl_matrix_get(rot, 1,1), gsl_matrix_get(rot, 1,2)); printf("Rotation Matrix 2: %e,%e, %e\n", gsl_matrix_get(rot, 2,0), gsl_matrix_get(rot, 2,1), gsl_matrix_get(rot, 2,2)); printf("Final EL_P_vec: %e, %e, %e,%e\n", *(el_p+0), gsl_vector_get(&el_p_prime.vector,0), gsl_vector_get(&el_p_prime.vector,1), gsl_vector_get(&el_p_prime.vector,2)); */ gsl_matrix_free (rot);gsl_vector_free(result); } double averagePhotonEnergy(struct photon *ph, int num_ph) { //to calculate weighted photon energy in ergs int i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double e_sum=0, w_sum=0; #pragma omp parallel for reduction(+:e_sum) reduction(+:w_sum) for (i=0;i<num_ph;i++) { #if CYCLOSYNCHROTRON_SWITCH == ON if (((ph+i)->weight != 0)) //dont want account for null or absorbed UNABSORBED_CS_PHOTON photons #endif { e_sum+=(((ph+i)->p0)*((ph+i)->weight)); w_sum+=((ph+i)->weight); } } return (e_sum*C_LIGHT)/w_sum; } void phScattStats(struct photon *ph, int ph_num, int *max, int *min, double *avg, double *r_avg, FILE *fPtr ) { int temp_max=0, temp_min=INT_MAX, i=0, count=0, count_synch=0, count_comp=0, count_i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double sum=0, avg_r_sum=0, avg_r_sum_synch=0, avg_r_sum_comp=0, avg_r_sum_inject=0; //printf("Num threads: %d", num_thread); #pragma omp parallel for num_threads(num_thread) reduction(min:temp_min) reduction(max:temp_max) reduction(+:sum) reduction(+:avg_r_sum) reduction(+:count) for (i=0;i<ph_num;i++) { #if CYCLOSYNCHROTRON_SWITCH == ON if (((ph+i)->weight != 0)) //dont want account for null or absorbed UNABSORBED_CS_PHOTON photons #endif { sum+=((ph+i)->num_scatt); avg_r_sum+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2)); //printf("%d %c %e %e %e %e %e %e\n", i, (ph+i)->type, (ph+i)->p0, (ph+i)->comv_p0, (ph+i)->r0, (ph+i)->r1, (ph+i)->r2, (ph+i)->num_scatt); if (((ph+i)->num_scatt) > temp_max ) { temp_max=((ph+i)->num_scatt); //printf("The new max is: %d\n", temp_max); } //if ((i==0) || (((ph+i)->num_scatt)<temp_min)) if (((ph+i)->num_scatt)<temp_min) { temp_min=((ph+i)->num_scatt); //printf("The new min is: %d\n", temp_min); } if (((ph+i)->type) == INJECTED_PHOTON ) { avg_r_sum_inject+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2)); count_i++; } if ((((ph+i)->type) == COMPTONIZED_PHOTON) || (((ph+i)->type) == UNABSORBED_CS_PHOTON)) { avg_r_sum_comp+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2)); count_comp++; } count++; } if (((ph+i)->type) == CS_POOL_PHOTON ) { avg_r_sum_synch+=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2)); count_synch++; } } fprintf(fPtr, "In this frame Avg r for i type: %e c and o type: %e and s type: %e\n", avg_r_sum_inject/count_i, avg_r_sum_comp/count_comp, avg_r_sum_synch/count_synch); fflush(fPtr); //exit(0); *avg=sum/count; *r_avg=avg_r_sum/count; *max=temp_max; *min=temp_min; } void cylindricalPrep(struct hydro_dataframe *hydro_data) { double gamma_infinity=100, t_comov=1e5, ddensity=3e-7;// the comoving temperature in Kelvin, and the comoving density in g/cm^2 int i=0; double vel=sqrt(1-pow(gamma_infinity, -2.0)), lab_dens=gamma_infinity*ddensity; for (i=0; i<hydro_data->num_elements; i++) { ((hydro_data->gamma))[i]=gamma_infinity; ((hydro_data->dens))[i]=ddensity; ((hydro_data->dens_lab))[i]=lab_dens; ((hydro_data->pres))[i]=(A_RAD*pow(t_comov, 4.0))/(3); ((hydro_data->temp))[i]=t_comov; //just assign t_comov #if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL ((hydro_data->v0))[i]=0; ((hydro_data->v1))[i]=vel; //geometry dependent want this to be parallel to jet axis #endif #if GEOMETRY == SPHERICAL ((hydro_data->v0))[i]=vel*cos(((hydro_data->r1))[i]);//rhat ((hydro_data->v1))[i]=-vel*sin(((hydro_data->r1))[i]);//theta hat direction #endif #if DIMENSIONS == TWO_POINT_FIVE //have to make sure that the 3rd vctro direction is set to 0 in 2.5D case ((hydro_data->v2))[i]=0; #endif #else #if GEOMETRY == CARTESIAN ((hydro_data->v0))[i]=0; ((hydro_data->v1))[i]=0; ((hydro_data->v2))[i]=vel; #endif #if GEOMETRY == SPHERICAL ((hydro_data->v0))[i]=vel*cos(((hydro_data->r1))[i]);//rhat ((hydro_data->v1))[i]=-vel*sin(((hydro_data->r1))[i]);//theta hat direction ((hydro_data->v2))[i]=0; #endif #if GEOMETRY == POLAR ((hydro_data->v0))[i]=0; ((hydro_data->v1))[i]=0; ((hydro_data->v2))[i]=vel; #endif #endif } } void sphericalPrep(struct hydro_dataframe *hydro_data, FILE *fPtr) { double gamma_infinity=100, lumi=1e52, r00=1e8; //shopuld be 10^57 //double gamma_infinity=5, lumi=1e52, r00=1e8; //shopuld be 10^57 double vel=0, r=0; int i=0; for (i=0; i<hydro_data->num_elements; i++) { if (((hydro_data->r))[i] >= (r00*gamma_infinity)) { ((hydro_data->gamma))[i]=gamma_infinity; ((hydro_data->pres))[i]=(lumi*pow(r00, 2.0/3.0)*pow(((hydro_data->r))[i], -8.0/3.0) )/(12.0*M_PI*C_LIGHT*pow(gamma_infinity, 4.0/3.0)); } else { ((hydro_data->gamma))[i]=((hydro_data->r))[i]/r00; ((hydro_data->pres))[i]=(lumi*pow(r00, 2.0))/(12.0*M_PI*C_LIGHT*pow(((hydro_data->r))[i], 4.0) ); } ((hydro_data->dens))[i]=lumi/(4*M_PI*pow(((hydro_data->r))[i], 2.0)*pow(C_LIGHT, 3.0)*gamma_infinity*(((hydro_data->gamma))[i])); ((hydro_data->dens_lab))[i]=(((hydro_data->dens))[i])*(((hydro_data->gamma))[i]); ((hydro_data->temp))[i]=pow(3*(((hydro_data->pres))[i])/(A_RAD) ,1.0/4.0); vel=sqrt(1-(pow(((hydro_data->gamma))[i], -2.0))); #if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2)); ((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; ((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial #endif #if GEOMETRY == SPHERICAL ((hydro_data->v0))[i]=vel;//rhat ((hydro_data->v1))[i]=0;//theta hat direction #endif #if DIMENSIONS == TWO_POINT_FIVE //have to make sure that the 3rd vctro direction is set to 0 in 2.5D case ((hydro_data->v2))[i]=0; #endif #else #if GEOMETRY == CARTESIAN r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2)+pow(((hydro_data->r2))[i], 2)); ((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; ((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial ((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r; #endif #if GEOMETRY == SPHERICAL ((hydro_data->v0))[i]=vel;//rhat ((hydro_data->v1))[i]=0;//theta hat direction ((hydro_data->v2))[i]=0; #endif #if GEOMETRY == POLAR r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r2))[i], 2)); ((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; //need to figure this out ((hydro_data->v1))[i]=0; ((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r; #endif #endif //fprintf(fPtr,"Gamma: %lf\nR: %lf\nPres: %e\nvel %lf\nX: %lf\nY %lf\nVx: %lf\nVy: %lf\nDens: %e\nLab_Dens: %e\nTemp: %lf\n", *(gamma+i), *(r+i), *(pres+i), vel, *(x+i), *(y+i), *(vx+i), *(vy+i), *(dens+i), *(dens_lab+i), *(temp+i)); } } void structuredFireballPrep(struct hydro_dataframe *hydro_data, FILE *fPtr) { //This model is provided by Lundman, Peer, Ryde 2014, use this to compare our MCRaT polarization to their polarizations double gamma_0=100, lumi=1e52, r00=1e8, theta_j=1e-2, p=4; //theta_j in paper is 1e-2, 3e-2, 1e-1 and p is 1,2,4 double T_0=pow(lumi/(4*M_PI*r00*r00*A_RAD*C_LIGHT), 1.0/4.0); double eta=0, r_sat=0, r; double vel=0, theta_ratio=0; int i=0; for (i=0; i<hydro_data->num_elements; i++) { theta_ratio=((hydro_data->theta)[i])/theta_j; eta=gamma_0/sqrt(1+pow(theta_ratio, 2*p)); if ((hydro_data->theta)[i] >= theta_j*pow(gamma_0/2, 1.0/p)) { //*(gamma+i)=2; //outside with of shear layer have gamma be 2 like in paper eta=2.0; } r_sat=eta*r00; if (((hydro_data->r)[i]) >= r_sat) { (hydro_data->gamma)[i]=eta; (hydro_data->temp)[i]=T_0*pow(r_sat/((hydro_data->r)[i]), 2.0/3.0)/eta; } else { (hydro_data->gamma)[i]=((hydro_data->r)[i])/r_sat; //not sure if this is right but it shouldn't matter since we're injecting our photons far from r00 (hydro_data->temp)[i]=T_0; } vel=sqrt(1-(pow((hydro_data->gamma)[i], -2.0))); (hydro_data->dens)[i] = M_P*lumi/(4*M_PI*M_P*C_LIGHT*C_LIGHT*C_LIGHT*eta*vel*((hydro_data->gamma)[i])*((hydro_data->r)[i])*((hydro_data->r)[i])); //equation paper has extra c, but then units dont work out (hydro_data->dens_lab)[i]=((hydro_data->dens)[i])*((hydro_data->gamma)[i]); (hydro_data->pres)[i]=(A_RAD*pow((hydro_data->temp)[i], 4.0))/(3); #if DIMENSIONS == TWO || DIMENSIONS == TWO_POINT_FIVE #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2)); ((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; ((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial #endif #if GEOMETRY == SPHERICAL ((hydro_data->v0))[i]=vel;//rhat ((hydro_data->v1))[i]=0;//theta hat direction #endif #if DIMENSIONS == TWO_POINT_FIVE //have to make sure that the 3rd vctro direction is set to 0 in 2.5D case ((hydro_data->v2))[i]=0; #endif #else #if GEOMETRY == CARTESIAN r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r1))[i], 2)+pow(((hydro_data->r2))[i], 2)); ((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; ((hydro_data->v1))[i]=(vel*(((hydro_data->r1))[i]))/r; //geometry dependent want this to be radial ((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r; #endif #if GEOMETRY == SPHERICAL ((hydro_data->v0))[i]=vel;//rhat ((hydro_data->v1))[i]=0;//theta hat direction ((hydro_data->v2))[i]=0; #endif #if GEOMETRY == POLAR r=sqrt(pow(((hydro_data->r0))[i], 2)+ pow(((hydro_data->r2))[i], 2)); ((hydro_data->v0))[i]=(vel*(((hydro_data->r0))[i]))/r; ((hydro_data->v1))[i]=0; ((hydro_data->v2))[i]=(vel*(((hydro_data->r2))[i]))/r; #endif #endif //fprintf(fPtr,"eta: %lf\nr_sat: %lf\nGamma: %lf\nR: %lf\nTheta: %lf\nPres: %e\nvel %lf\nX: %lf\nY %lf\nVx: %lf\nVy: %lf\nDens: %e\nLab_Dens: %e\nTemp: %lf\n\n", eta, r_sat, *(gamma+i), *(r+i), (*(theta+i)), *(pres+i), vel, *(x+i), *(y+i), *(vx+i), *(vy+i), *(dens+i), *(dens_lab+i), *(temp+i)); } } void phMinMax(struct photon *ph, int ph_num, double *min, double *max, double *min_theta, double *max_theta, FILE *fPtr) { double temp_r_max=0, temp_r_min=DBL_MAX, temp_theta_max=0, temp_theta_min=DBL_MAX; int i=0; #if defined(_OPENMP) int num_thread=omp_get_num_threads(); #endif double ph_r=0, ph_theta=0; #pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max) for (i=0;i<ph_num;i++) { if ((ph+i)->weight != 0) { ph_r=sqrt(((ph+i)->r0)*((ph+i)->r0) + ((ph+i)->r1)*((ph+i)->r1) + ((ph+i)->r2)*((ph+i)->r2)); ph_theta=acos(((ph+i)->r2) /ph_r); //this is the photons theta psition in the FLASH grid, gives in radians if (ph_r > temp_r_max ) { temp_r_max=ph_r; //fprintf(fPtr, "The new max is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } //if ((i==0) || (ph_r<temp_r_min)) if (ph_r<temp_r_min) { temp_r_min=ph_r; //fprintf(fPtr, "The new min is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_min, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } if (ph_theta > temp_theta_max ) { temp_theta_max=ph_theta; //fprintf(fPtr, "The new max is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } //if ((i==0) || (ph_r<temp_r_min)) if (ph_theta<temp_theta_min) { temp_theta_min=ph_theta; //fprintf(fPtr, "The new min is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_min, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } } } *max=temp_r_max; *min=temp_r_min; *max_theta=temp_theta_max; *min_theta=temp_theta_min; }
GB_unop__identity_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_fp64 // op(A') function: GB_unop_tran__identity_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_fp64 ( uint64_t *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pq_filter.h
/* * pq_filter.h * * Created on: Jul 16, 2014 * Author: dariuss * * * Skyline filter based on priority queue */ #ifndef PQ_FILTER_H_ #define PQ_FILTER_H_ #include <queue> #include <vector> #if defined(_OPENMP) #include <omp.h> #include <parallel/algorithm> #else #include <algorithm> #define omp_get_thread_num() 0 #define omp_set_num_threads( t ) 0 #endif #include <common/pq_filter.h> #include "common/common.h" using namespace std; typedef std::pair<uint32_t, float> mn_w_idx; struct PQComparator { bool operator()( const mn_w_idx &a, const mn_w_idx &b ) { return a.second < b.second; } }; typedef priority_queue<mn_w_idx, vector<mn_w_idx>, PQComparator> PQ; class PQFilter { public: /* * Executes priority queue based filtering on data using num_threads * queues each of pq_size. * * Side affect: simultaneously computes Manhattan norm in TUPLE.score. */ template<typename T> static uint32_t Execute( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t num_threads ); }; // Templated static function has to be defined in a header file.. template<typename T> uint32_t PQFilter::Execute( T* data, const uint32_t n, const uint32_t pq_size, const uint32_t num_threads ) { PQ * const PQs_ = new PQ[num_threads]; /* Init all threads to first q_size points and score them. */ for (uint32_t i = 0; i < pq_size; ++i) { data[i].score = 0; for (uint32_t j = 0; j < NUM_DIMS; ++j) { data[i].score += data[i].elems[j]; } for (uint32_t j = 0; j < num_threads; ++j) { PQs_[j].push( mn_w_idx( i, data[i].score ) ); } } /* Computing top man norm scores and remember best q_size ones. */ #pragma omp parallel num_threads(num_threads) { const uint32_t th_id = omp_get_thread_num(); mn_w_idx worst_of_bests = PQs_[th_id].top(); #pragma omp for nowait for (uint32_t i = 0; i < n; ++i) { float sum = 0; for (uint32_t j = 0; j < NUM_DIMS; j++) { sum += data[i].elems[j]; } data[i].score = sum; /* Compare to best found man norms for this thread. */ if ( worst_of_bests.second > sum ) { PQs_[th_id].pop(); PQs_[th_id].push( mn_w_idx( i, sum ) ); worst_of_bests = PQs_[th_id].top(); } } } // END PARALLEL FOR /* Take top pruners and merge them into one set. */ vector<uint32_t> pruners; pruners.reserve( num_threads * pq_size ); for (uint32_t i = 0; i < num_threads; ++i) { while ( !PQs_[i].empty() ) { mn_w_idx top = PQs_[i].top(); pruners.push_back( top.first ); PQs_[i].pop(); } } // UPD_PROFILER( "01 calc mns" ); /* Pre-filter dataset using top pruners. */ #pragma omp parallel for for (uint32_t i = 0; i < n; ++i) { for (vector<uint32_t>::iterator it = pruners.begin(); it != pruners.end(); ++it) { if ( DominateLeft( data[*it], data[i] ) ) { data[i].markPruned(); break; } } } // END PARALLEL FOR /* Determine how many points were pruned. */ uint32_t new_n = n; for (uint32_t i = 0; i < new_n; ++i) { if ( data[i].isPruned() ) { data[i--] = data[--new_n]; } } //#ifndef NVERBOSE // printf( " pq_filter: %0.2f %% pruned\n", (n - new_n) / (double) n * 100.0 ); //#endif return new_n; } #endif /* PQ_FILTER_H_ */
setup_DF.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <libgen.h> #include <omp.h> #include "utils.h" #include "libCMS.h" #include "TinyDFT_typedef.h" #include "setup_DF.h" static void TinyDFT_load_DF_basis(TinyDFT_p TinyDFT, char *df_bas_fname, char *xyz_fname, const int save_mem) { int nthread = TinyDFT->nthread; CMS_createBasisSet(&(TinyDFT->df_basis)); CMS_loadChemicalSystem(TinyDFT->df_basis, df_bas_fname, xyz_fname); CMS_Simint_setup_DF(TinyDFT->simint, TinyDFT->df_basis); TinyDFT->df_save_mem = (save_mem == 1) ? 1 : 0; TinyDFT->df_bas_name = basename(df_bas_fname); TinyDFT->df_nbf = CMS_getNumFuncs (TinyDFT->df_basis); TinyDFT->df_nshell = CMS_getNumShells(TinyDFT->df_basis); printf("Density fitting information:\n"); printf(" DF basis set = %s\n", TinyDFT->df_bas_name); printf(" # DF shells = %d\n", TinyDFT->df_nshell); printf(" # DF basis functions = %d\n", TinyDFT->df_nbf); int nbf = TinyDFT->nbf; int df_nshell = TinyDFT->df_nshell; int mat_size = TinyDFT->mat_size; TinyDFT->df_shell_bf_sind = (int*) malloc_aligned(INT_MSIZE * (df_nshell + 1), 64); TinyDFT->df_shell_bf_num = (int*) malloc_aligned(INT_MSIZE * df_nshell, 64); TinyDFT->bf_pair_mask = (int*) malloc_aligned(INT_MSIZE * mat_size, 64); TinyDFT->bf_pair_j = (int*) malloc_aligned(INT_MSIZE * mat_size, 64); TinyDFT->bf_pair_diag = (int*) malloc_aligned(INT_MSIZE * nbf, 64); TinyDFT->bf_mask_displs = (int*) malloc_aligned(INT_MSIZE * (nbf + 1), 64); TinyDFT->df_sp_scrval = (double*) malloc_aligned(DBL_MSIZE * df_nshell, 64); assert(TinyDFT->df_shell_bf_sind != NULL); assert(TinyDFT->df_shell_bf_num != NULL); assert(TinyDFT->bf_pair_mask != NULL); assert(TinyDFT->bf_pair_j != NULL); assert(TinyDFT->bf_pair_diag != NULL); assert(TinyDFT->bf_mask_displs != NULL); assert(TinyDFT->df_sp_scrval != NULL); TinyDFT->mem_size += (double) (INT_MSIZE * (df_nshell * 2 + 1)); TinyDFT->mem_size += (double) (INT_MSIZE * (nbf * 2 + 1)); TinyDFT->mem_size += (double) (INT_MSIZE * 2 * mat_size); TinyDFT->mem_size += (double) (DBL_MSIZE * df_nshell); for (int i = 0; i < TinyDFT->df_nshell; i++) { TinyDFT->df_shell_bf_sind[i] = CMS_getFuncStartInd(TinyDFT->df_basis, i); TinyDFT->df_shell_bf_num[i] = CMS_getShellDim (TinyDFT->df_basis, i); } TinyDFT->df_shell_bf_sind[TinyDFT->df_nshell] = TinyDFT->df_nbf; int n_occ = TinyDFT->n_occ; int df_nbf = TinyDFT->df_nbf; int df_nbf_16 = (df_nbf + 15) / 16 * 16; TinyDFT->df_nbf_16 = df_nbf_16; size_t temp_J_msize = (size_t) df_nbf_16 * (size_t) nthread; size_t temp_K_msize = (size_t) df_nbf * (size_t) n_occ * (size_t) nbf; size_t df_mat_msize = (size_t) df_nbf * (size_t) df_nbf; df_mat_msize *= DBL_MSIZE; temp_J_msize *= DBL_MSIZE; temp_K_msize *= DBL_MSIZE; TinyDFT->Jpq = (double*) malloc_aligned(df_mat_msize, 64); TinyDFT->temp_J = (double*) malloc_aligned(temp_J_msize, 64); //TinyDFT->temp_K = (double*) malloc_aligned(temp_K_msize, 64); TinyDFT->temp_K = NULL; // Allocate it when needed assert(TinyDFT->Jpq != NULL); assert(TinyDFT->temp_J != NULL); //assert(TinyDFT->temp_K != NULL); TinyDFT->mem_size += (double) df_mat_msize; TinyDFT->mem_size += (double) temp_J_msize; //TinyDFT->mem_size += (double) temp_K_msize; TinyDFT->pqA = NULL; TinyDFT->df_tensor = NULL; } static void TinyDFT_init_batch_dgemm(TinyDFT_p TinyDFT) { #define DGEMM_BLK_SIZE 64 int nbf = TinyDFT->nbf; int mat_K_BS = nbf / 10; if (mat_K_BS < DGEMM_BLK_SIZE) mat_K_BS = DGEMM_BLK_SIZE; int nblock = (nbf + mat_K_BS - 1) / mat_K_BS; int nblock0 = nbf / mat_K_BS; int bs_rem = nbf % mat_K_BS; int ntile = (nblock + 1) * nblock / 2; TinyDFT->mat_K_ntile = ntile; TinyDFT->mat_K_BS = mat_K_BS; TinyDFT->mat_K_group_size = (int*) malloc(sizeof(int) * nbf); int *group_size = &TinyDFT->mat_K_group_size[0]; group_size[0] = (nblock0 * (nblock0 + 1)) / 2; if (bs_rem > 0) { group_size[1] = nblock0; group_size[2] = 1; } else { group_size[1] = 0; group_size[2] = 0; } TinyDFT->mat_K_transa = (CBLAS_TRANSPOSE*) malloc(sizeof(CBLAS_TRANSPOSE) * nbf); TinyDFT->mat_K_transb = (CBLAS_TRANSPOSE*) malloc(sizeof(CBLAS_TRANSPOSE) * nbf); TinyDFT->mat_K_m = (int*) malloc(sizeof(int) * nbf); TinyDFT->mat_K_n = (int*) malloc(sizeof(int) * nbf); TinyDFT->mat_K_k = (int*) malloc(sizeof(int) * nbf); TinyDFT->mat_K_alpha = (double*) malloc(sizeof(double) * nbf); TinyDFT->mat_K_beta = (double*) malloc(sizeof(double) * nbf); TinyDFT->mat_K_a = (double**) malloc(sizeof(double*) * nbf); TinyDFT->mat_K_b = (double**) malloc(sizeof(double*) * nbf); TinyDFT->mat_K_c = (double**) malloc(sizeof(double*) * nbf); TinyDFT->mat_K_lda = (int*) malloc(sizeof(int) * nbf); TinyDFT->mat_K_ldb = (int*) malloc(sizeof(int) * nbf); TinyDFT->mat_K_ldc = (int*) malloc(sizeof(int) * nbf); assert(TinyDFT->mat_K_transa != NULL); assert(TinyDFT->mat_K_transb != NULL); assert(TinyDFT->mat_K_m != NULL); assert(TinyDFT->mat_K_n != NULL); assert(TinyDFT->mat_K_k != NULL); assert(TinyDFT->mat_K_alpha != NULL); assert(TinyDFT->mat_K_beta != NULL); assert(TinyDFT->mat_K_a != NULL); assert(TinyDFT->mat_K_b != NULL); assert(TinyDFT->mat_K_c != NULL); assert(TinyDFT->mat_K_lda != NULL); assert(TinyDFT->mat_K_ldb != NULL); assert(TinyDFT->mat_K_ldc != NULL); TinyDFT->mem_size += (double) (sizeof(CBLAS_TRANSPOSE) * nbf * 2); TinyDFT->mem_size += (double) (INT_MSIZE * nbf * 7); TinyDFT->mem_size += (double) (DBL_MSIZE * nbf * 5); } static void TinyDFT_prepare_DF_sparsity(TinyDFT_p TinyDFT) { double st = get_wtime_sec(); int nbf = TinyDFT->nbf; int n_occ = TinyDFT->n_occ; int df_nbf = TinyDFT->df_nbf; int mat_size = TinyDFT->mat_size; int *bf_pair_mask = TinyDFT->bf_pair_mask; int *bf_pair_j = TinyDFT->bf_pair_j; int *bf_pair_diag = TinyDFT->bf_pair_diag; int *bf_mask_displs = TinyDFT->bf_mask_displs; double *bf_pair_scrval = TinyDFT->bf_pair_scrval; // Find the maximum screen value in density fitting shell pairs double max_df_scrval = 0; for (int i = 0; i < TinyDFT->df_nshell; i++) { double df_scrval = CMS_Simint_get_DF_sp_scrval(TinyDFT->simint, i); TinyDFT->df_sp_scrval[i] = df_scrval; if (df_scrval > max_df_scrval) max_df_scrval = df_scrval; } TinyDFT->max_df_scrval = max_df_scrval; // Screen all basis function pairs for DF double eta = TinyDFT->shell_scrtol2 / max_df_scrval; int bf_pair_nnz = 0; bf_mask_displs[0] = 0; for (int i = 0; i < nbf; i++) { int offset_i = i * nbf; for (int j = 0; j < nbf; j++) { if (bf_pair_scrval[offset_i + j] > eta) { bf_pair_mask[offset_i + j] = bf_pair_nnz; bf_pair_j[bf_pair_nnz] = j; bf_pair_nnz++; } else { bf_pair_mask[offset_i + j] = -1; } } // (i, i) always survives screening bf_pair_diag[i] = bf_pair_mask[offset_i + i]; bf_mask_displs[i + 1] = bf_pair_nnz; } double bf_pair_density = 100.0 * (double) bf_pair_nnz / (double) mat_size; double et = get_wtime_sec(); double ut = et - st; printf("TinyDFT handling shell pair sparsity over, elapsed time = %.3lf (s)\n", ut); int df_save_mem = TinyDFT->df_save_mem; size_t tensor_msize = (size_t) bf_pair_nnz * (size_t) df_nbf * DBL_MSIZE; double axu_array_MB = (double) df_nbf * (double) n_occ * (double) nbf * DBL_MSIZE / 1048576.0; double df_tensor_MB = tensor_msize / 1048576.0; st = get_wtime_sec(); if (df_save_mem == 0) { TinyDFT->pqA = (double*) malloc_aligned(tensor_msize, 64); TinyDFT->df_tensor = (double*) malloc_aligned(tensor_msize, 64); assert(TinyDFT->pqA != NULL); assert(TinyDFT->df_tensor != NULL); TinyDFT->mem_size += (double) tensor_msize * 2; axu_array_MB += df_tensor_MB; } else { TinyDFT->pqA = (double*) malloc_aligned(tensor_msize, 64); assert(TinyDFT->pqA != NULL); TinyDFT->df_tensor = TinyDFT->pqA; TinyDFT->mem_size += (double) tensor_msize; } et = get_wtime_sec(); ut = et - st; printf("TinyDFT DF memory allocation over, elapsed time = %.3lf (s)\n", ut); printf("DF storage & auxiliary work buffer = %.2lf, %.2lf MB\n", df_tensor_MB, axu_array_MB); printf("DF screened basis function pairs: %d out of %d (density = %.2lf%%)\n", bf_pair_nnz, mat_size, bf_pair_density); } static void copy_3center_integral_results( int npair, int *P_list, int nint, double *ERIs, int *df_shell_bf_sind, double *pqA, int *bf_pair_mask, int nbf, int df_nbf, int startM, int endM, int startN, int endN, int dimN ) { for (int ipair = 0; ipair < npair; ipair++) { int P = P_list[ipair]; int startP = df_shell_bf_sind[P]; int dimP = df_shell_bf_sind[P + 1] - startP; size_t row_mem_size = sizeof(double) * dimP; double *ERI_ipair = ERIs + nint * ipair; for (int iM = startM; iM < endM; iM++) { int im = iM - startM; for (int iN = startN; iN < endN; iN++) { int in = iN - startN; double *eri_ptr = ERI_ipair + (im * dimN + in) * dimP; int iMN_pair_idx = bf_pair_mask[iM * nbf + iN]; int iNM_pair_idx = bf_pair_mask[iN * nbf + iM]; size_t pqA_offset0 = (size_t) iMN_pair_idx * (size_t) df_nbf + (size_t) startP; size_t pqA_offset1 = (size_t) iNM_pair_idx * (size_t) df_nbf + (size_t) startP; double *pqA_ptr0 = pqA + pqA_offset0; double *pqA_ptr1 = pqA + pqA_offset1; memcpy(pqA_ptr0, eri_ptr, row_mem_size); memcpy(pqA_ptr1, eri_ptr, row_mem_size); } } } } static void TinyDFT_calc_DF_3center_int(TinyDFT_p TinyDFT) { int nbf = TinyDFT->nbf; int nthread = TinyDFT->nthread; int df_nbf = TinyDFT->df_nbf; int nshell = TinyDFT->nshell; int num_valid_sp = TinyDFT->num_valid_sp; int df_max_am = TinyDFT->simint->df_max_am; int *shell_bf_sind = TinyDFT->shell_bf_sind; int *bf_pair_mask = TinyDFT->bf_pair_mask; int *valid_sp_lid = TinyDFT->valid_sp_lid; int *valid_sp_rid = TinyDFT->valid_sp_rid; int *df_shell_bf_sind = TinyDFT->df_shell_bf_sind; int *df_am_shell_spos = TinyDFT->simint->df_am_shell_spos; int *df_am_shell_id = TinyDFT->simint->df_am_shell_id; double scrtol2 = TinyDFT->shell_scrtol2; double *pqA = TinyDFT->pqA; double *sp_scrval = TinyDFT->sp_scrval; double *df_sp_scrval = TinyDFT->df_sp_scrval; Simint_p simint = TinyDFT->simint; int *P_lists = (int*) malloc(sizeof(int) * _SIMINT_NSHELL_SIMD * nthread); assert(P_lists != NULL); #pragma omp parallel { int tid = omp_get_thread_num(); int nint, npair; int *thread_P_list = P_lists + tid * _SIMINT_NSHELL_SIMD; double *thread_ERIs; void *multi_sp; CMS_Simint_create_multi_sp(&multi_sp); #pragma omp for schedule(dynamic) for (int iMN = 0; iMN < num_valid_sp; iMN++) { int M = valid_sp_lid[iMN]; int N = valid_sp_rid[iMN]; int startM = shell_bf_sind[M]; int endM = shell_bf_sind[M + 1]; int startN = shell_bf_sind[N]; int endN = shell_bf_sind[N + 1]; int dimN = endN - startN; double scrval0 = sp_scrval[M * nshell + N]; for (int iAM = 0; iAM <= df_max_am; iAM++) { npair = 0; int iP_start = df_am_shell_spos[iAM]; int iP_end = df_am_shell_spos[iAM + 1]; for (int iP = iP_start; iP < iP_end; iP++) { int P = df_am_shell_id[iP]; double scrval1 = df_sp_scrval[P]; if (scrval0 * scrval1 < scrtol2) continue; thread_P_list[npair] = P; npair++; if (npair == _SIMINT_NSHELL_SIMD) { CMS_Simint_calc_DF_shellquartet_batch( simint, tid, M, N, npair, thread_P_list, &thread_ERIs, &nint, &multi_sp ); if (nint > 0) { copy_3center_integral_results( npair, thread_P_list, nint, thread_ERIs, df_shell_bf_sind, pqA, bf_pair_mask, nbf, df_nbf, startM, endM, startN, endN, dimN ); } npair = 0; } } // for (int iP = iP_start; iP < iP_end; iP++) if (npair > 0) { CMS_Simint_calc_DF_shellquartet_batch( simint, tid, M, N, npair, thread_P_list, &thread_ERIs, &nint, &multi_sp ); if (nint > 0) { copy_3center_integral_results( npair, thread_P_list, nint, thread_ERIs, df_shell_bf_sind, pqA, bf_pair_mask, nbf, df_nbf, startM, endM, startN, endN, dimN ); } npair = 0; } } // for (int iAM = 0; iAM <= simint->df_max_am; iAM++) } // for (int iMN = 0; iMN < TinyDFT->num_valid_sp; iMN++) CMS_Simint_free_multi_sp(multi_sp); } // #pragma omp parallel free(P_lists); } static void TinyDFT_calc_DF_2center_int(TinyDFT_p TinyDFT) { // Fast enough, need not to batch shell quartets int df_nbf = TinyDFT->df_nbf; int df_nshell = TinyDFT->df_nshell; int *df_shell_bf_sind = TinyDFT->df_shell_bf_sind; double scrtol2 = TinyDFT->shell_scrtol2; double *Jpq = TinyDFT->Jpq; double *df_sp_scrval = TinyDFT->df_sp_scrval; Simint_p simint = TinyDFT->simint; #pragma omp parallel { int tid = omp_get_thread_num(); int nint; double *ERIs; #pragma omp for schedule(dynamic) for (int M = 0; M < df_nshell; M++) { double scrval0 = df_sp_scrval[M]; for (int N = M; N < df_nshell; N++) { double scrval1 = df_sp_scrval[N]; if (scrval0 * scrval1 < scrtol2) continue; CMS_Simint_calc_DF_shellpair(simint, tid, M, N, &ERIs, &nint); if (nint <= 0) continue; int startM = df_shell_bf_sind[M]; int endM = df_shell_bf_sind[M + 1]; int startN = df_shell_bf_sind[N]; int endN = df_shell_bf_sind[N + 1]; int dimN = endN - startN; for (int iM = startM; iM < endM; iM++) { int im = iM - startM; for (int iN = startN; iN < endN; iN++) { int in = iN - startN; double eri = ERIs[im * dimN + in]; Jpq[iM * df_nbf + iN] = eri; Jpq[iN * df_nbf + iM] = eri; } } } // for (int N = i; N < df_nshell; N++) } // for (int M = 0; M < df_nshell; M++) } // #pragma omp parallel } static void TinyDFT_calc_invsqrt_Jpq(TinyDFT_p TinyDFT) { int df_nbf = TinyDFT->df_nbf; double *Jpq = TinyDFT->Jpq; /* // Don't use this stupid eigen-decomposition size_t df_mat_msize = DBL_MSIZE * df_nbf * df_nbf; double *tmp_mat0 = malloc_aligned(df_mat_msize, 64); double *tmp_mat1 = malloc_aligned(df_mat_msize, 64); double *df_eigval = malloc_aligned(DBL_MSIZE * df_nbf, 64); assert(tmp_mat0 != NULL && tmp_mat1 != NULL); // Diagonalize Jpq = U * S * U^T, the eigenvectors are stored in tmp_mat0 memcpy(tmp_mat0, Jpq, df_mat_msize); LAPACKE_dsyev(LAPACK_ROW_MAJOR, 'V', 'U', df_nbf, tmp_mat0, df_nbf, df_eigval); // Apply inverse square root to eigenvalues to get the inverse square root of Jpq for (int i = 0; i < df_nbf; i++) { if (df_eigval[i] > 1e-14) df_eigval[i] = 1.0 / sqrt(df_eigval[i]); else df_eigval[i] = 0.0; } // Right multiply the S^{-1/2} to U #pragma omp parallel for for (int irow = 0; irow < df_nbf; irow++) { double *tmp_mat0_ptr = tmp_mat0 + irow * df_nbf; double *tmp_mat1_ptr = tmp_mat1 + irow * df_nbf; memcpy(tmp_mat1_ptr, tmp_mat0_ptr, DBL_MSIZE * df_nbf); for (int icol = 0; icol < df_nbf; icol++) tmp_mat0_ptr[icol] *= df_eigval[icol]; } // Get Jpq^{-1/2} = U * S^{-1/2} * U', Jpq^{-1/2} is stored in Jpq cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasTrans, df_nbf, df_nbf, df_nbf, 1.0, tmp_mat0, df_nbf, tmp_mat1, df_nbf, 0.0, Jpq, df_nbf ); free_aligned(tmp_mat0); free_aligned(tmp_mat1); free_aligned(df_eigval); */ size_t df_nbf2 = df_nbf * df_nbf; size_t df_mat_msize = DBL_MSIZE * df_nbf2; double *Jpq_invsqrt = malloc_aligned(df_mat_msize, 64); assert(Jpq_invsqrt != NULL); #pragma omp for for (int i = 0; i < df_nbf2; i++) Jpq_invsqrt[i] = Jpq[i]; LAPACKE_dpotrf(LAPACK_ROW_MAJOR, 'L', df_nbf, Jpq_invsqrt, df_nbf); LAPACKE_dtrtri(LAPACK_ROW_MAJOR, 'L', 'N', df_nbf, Jpq_invsqrt, df_nbf); double min_diag = 19241112; #pragma omp parallel for schedule(dynamic) reduction(min: min_diag) for (int i = 0; i < df_nbf; i++) { size_t idx_ii = i * df_nbf + i; if (min_diag > Jpq_invsqrt[idx_ii]) min_diag = Jpq_invsqrt[idx_ii]; for (int j = i + 1; j < df_nbf; j++) { size_t idx0 = i * df_nbf + j; size_t idx1 = j * df_nbf + i; Jpq_invsqrt[idx0] = Jpq_invsqrt[idx1]; Jpq_invsqrt[idx1] = 0.0; } } if (min_diag > 1e-12) { free_aligned(Jpq); TinyDFT->Jpq = Jpq_invsqrt; return; } else { printf("Density fitting: chol(Jpq) has diagonal elements < 1e-12, switching to pivoted incomplete Cholesky\n"); } int rank; int *piv = (int*) malloc(sizeof(int) * df_nbf); assert(piv != NULL); // Pivoted Cholesky decomposition P^T * Jpq * P = L * L^T LAPACKE_dpstrf(LAPACK_ROW_MAJOR, 'L', df_nbf, Jpq, df_nbf, piv, &rank, 1e-12); // Calculate L1^{-T}, L1 only contains the factorized part of L LAPACKE_dtrtri(LAPACK_ROW_MAJOR, 'L', 'N', rank, Jpq, df_nbf); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < rank; i++) { for (int j = i + 1; j < rank; j++) { size_t idx0 = i * df_nbf + j; size_t idx1 = j * df_nbf + i; Jpq[idx0] = Jpq[idx1]; Jpq[idx1] = 0.0; } } // Calculate P * [L_{1}^{-T} 0; 0 0] #pragma omp parallel { #pragma omp for for (size_t i = 0; i < df_nbf2; i++) Jpq_invsqrt[i] = 0.0; #pragma omp for for (int k = 0; k < rank; k++) { double *src = Jpq + k * df_nbf; double *dst = Jpq_invsqrt + (piv[k] - 1) * df_nbf; memcpy(dst, src, sizeof(double) * rank); } } free_aligned(Jpq); free(piv); TinyDFT->Jpq = Jpq_invsqrt; } static void TinyDFT_build_DF_tensor(TinyDFT_p TinyDFT) { double st, et; printf("---------- DF tensor construction ----------\n"); // Calculate 3-center density fitting integrals st = get_wtime_sec(); TinyDFT_calc_DF_3center_int(TinyDFT); et = get_wtime_sec(); printf("* 3-center integral : %.3lf (s)\n", et - st); // Calculate the Coulomb metric matrix st = get_wtime_sec(); TinyDFT_calc_DF_2center_int(TinyDFT); et = get_wtime_sec(); printf("* 2-center integral : %.3lf (s)\n", et - st); // Factorize the Jpq st = get_wtime_sec(); TinyDFT_calc_invsqrt_Jpq(TinyDFT); et = get_wtime_sec(); printf("* matrix inv-sqrt : %.3lf (s)\n", et - st); // Form the density fitting tensor st = get_wtime_sec(); int nbf = TinyDFT->nbf; int df_nbf = TinyDFT->df_nbf; int bf_pair_cnt = TinyDFT->bf_mask_displs[nbf]; int df_save_mem = TinyDFT->df_save_mem; double *Jpq = TinyDFT->Jpq; double *pqA = TinyDFT->pqA; double *df_tensor = TinyDFT->df_tensor; // df_tensor(i, j, k) = dot(pqA(i, j, 1:df_nbf), Jpq_invsqrt(1:df_nbf, k)) if (df_save_mem == 0) { cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, bf_pair_cnt, df_nbf, df_nbf, 1.0, pqA, df_nbf, Jpq, df_nbf, 0.0, df_tensor, df_nbf ); } else { int row_blksize = 8192; size_t pqA_blk_msize = sizeof(double) * (size_t) row_blksize * (size_t) df_nbf; double *pqA_block = (double*) malloc(pqA_blk_msize); assert(pqA_block != NULL); // df_tensor(i, j, k) = dot(pqA(i, j, 1:df_nbf), Jpq_invsqrt(1:df_nbf, k)) for (int srow = 0; srow < bf_pair_cnt; srow += row_blksize) { size_t pqA_offset = (size_t) srow * (size_t) df_nbf; double *pqA_ptr = pqA + pqA_offset; int nrow = (bf_pair_cnt - srow < row_blksize) ? (bf_pair_cnt - srow) : row_blksize; #pragma omp parallel for schedule(static) for (int i = 0; i < nrow * df_nbf; i++) pqA_block[i] = pqA_ptr[i]; cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, nrow, df_nbf, df_nbf, 1.0, pqA_block, df_nbf, Jpq, df_nbf, 0.0, pqA_ptr, df_nbf ); } free(pqA_block); TinyDFT->pqA = NULL; } et = get_wtime_sec(); printf("* build DF tensor : %.3lf (s)\n", et - st); printf("---------- DF tensor construction finished ----------\n"); } // Set up density fitting void TinyDFT_setup_DF(TinyDFT_p TinyDFT, char *df_bas_fname, char *xyz_fname, const int save_mem) { assert(TinyDFT != NULL); TinyDFT_load_DF_basis(TinyDFT, df_bas_fname, xyz_fname, save_mem); TinyDFT_init_batch_dgemm(TinyDFT); TinyDFT_prepare_DF_sparsity(TinyDFT); TinyDFT_build_DF_tensor(TinyDFT); CMS_Simint_free_DF_shellpairs(TinyDFT->simint); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
test2.c
int main() { int x; #pragma omp parallel { 0; if (1) { x = 0; 2; #pragma omp barrier x; 3; } else { 4; while (5) { 6; #pragma omp barrier 7; x = 10; #pragma omp barrier } 8; } 9; #pragma omp barrier 10; } }